1 /*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
12 *
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24 #include "cpuset-internal.h"
25
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/kernel.h>
29 #include <linux/mempolicy.h>
30 #include <linux/mm.h>
31 #include <linux/memory.h>
32 #include <linux/export.h>
33 #include <linux/rcupdate.h>
34 #include <linux/sched.h>
35 #include <linux/sched/deadline.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/task.h>
38 #include <linux/security.h>
39 #include <linux/oom.h>
40 #include <linux/sched/isolation.h>
41 #include <linux/wait.h>
42 #include <linux/workqueue.h>
43
44 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
45 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
46
47 /*
48 * There could be abnormal cpuset configurations for cpu or memory
49 * node binding, add this key to provide a quick low-cost judgment
50 * of the situation.
51 */
52 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
53
54 static const char * const perr_strings[] = {
55 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive",
56 [PERR_INVPARENT] = "Parent is an invalid partition root",
57 [PERR_NOTPART] = "Parent is not a partition root",
58 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
59 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
60 [PERR_HOTPLUG] = "No cpu available due to hotplug",
61 [PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
62 [PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
63 [PERR_ACCESS] = "Enable partition not permitted",
64 [PERR_REMOTE] = "Have remote partition underneath",
65 };
66
67 /*
68 * For local partitions, update to subpartitions_cpus & isolated_cpus is done
69 * in update_parent_effective_cpumask(). For remote partitions, it is done in
70 * the remote_partition_*() and remote_cpus_update() helpers.
71 */
72 /*
73 * Exclusive CPUs distributed out to local or remote sub-partitions of
74 * top_cpuset
75 */
76 static cpumask_var_t subpartitions_cpus;
77
78 /*
79 * Exclusive CPUs in isolated partitions
80 */
81 static cpumask_var_t isolated_cpus;
82
83 /*
84 * Housekeeping (HK_TYPE_DOMAIN) CPUs at boot
85 */
86 static cpumask_var_t boot_hk_cpus;
87 static bool have_boot_isolcpus;
88
89 /* List of remote partition root children */
90 static struct list_head remote_children;
91
92 /*
93 * A flag to force sched domain rebuild at the end of an operation.
94 * It can be set in
95 * - update_partition_sd_lb()
96 * - update_cpumasks_hier()
97 * - cpuset_update_flag()
98 * - cpuset_hotplug_update_tasks()
99 * - cpuset_handle_hotplug()
100 *
101 * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
102 *
103 * Note that update_relax_domain_level() in cpuset-v1.c can still call
104 * rebuild_sched_domains_locked() directly without using this flag.
105 */
106 static bool force_sd_rebuild;
107
108 /*
109 * Partition root states:
110 *
111 * 0 - member (not a partition root)
112 * 1 - partition root
113 * 2 - partition root without load balancing (isolated)
114 * -1 - invalid partition root
115 * -2 - invalid isolated partition root
116 *
117 * There are 2 types of partitions - local or remote. Local partitions are
118 * those whose parents are partition root themselves. Setting of
119 * cpuset.cpus.exclusive are optional in setting up local partitions.
120 * Remote partitions are those whose parents are not partition roots. Passing
121 * down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
122 * nodes are mandatory in creating a remote partition.
123 *
124 * For simplicity, a local partition can be created under a local or remote
125 * partition but a remote partition cannot have any partition root in its
126 * ancestor chain except the cgroup root.
127 */
128 #define PRS_MEMBER 0
129 #define PRS_ROOT 1
130 #define PRS_ISOLATED 2
131 #define PRS_INVALID_ROOT -1
132 #define PRS_INVALID_ISOLATED -2
133
is_prs_invalid(int prs_state)134 static inline bool is_prs_invalid(int prs_state)
135 {
136 return prs_state < 0;
137 }
138
139 /*
140 * Temporary cpumasks for working with partitions that are passed among
141 * functions to avoid memory allocation in inner functions.
142 */
143 struct tmpmasks {
144 cpumask_var_t addmask, delmask; /* For partition root */
145 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
146 };
147
inc_dl_tasks_cs(struct task_struct * p)148 void inc_dl_tasks_cs(struct task_struct *p)
149 {
150 struct cpuset *cs = task_cs(p);
151
152 cs->nr_deadline_tasks++;
153 }
154
dec_dl_tasks_cs(struct task_struct * p)155 void dec_dl_tasks_cs(struct task_struct *p)
156 {
157 struct cpuset *cs = task_cs(p);
158
159 cs->nr_deadline_tasks--;
160 }
161
is_partition_valid(const struct cpuset * cs)162 static inline int is_partition_valid(const struct cpuset *cs)
163 {
164 return cs->partition_root_state > 0;
165 }
166
is_partition_invalid(const struct cpuset * cs)167 static inline int is_partition_invalid(const struct cpuset *cs)
168 {
169 return cs->partition_root_state < 0;
170 }
171
172 /*
173 * Callers should hold callback_lock to modify partition_root_state.
174 */
make_partition_invalid(struct cpuset * cs)175 static inline void make_partition_invalid(struct cpuset *cs)
176 {
177 if (cs->partition_root_state > 0)
178 cs->partition_root_state = -cs->partition_root_state;
179 }
180
181 /*
182 * Send notification event of whenever partition_root_state changes.
183 */
notify_partition_change(struct cpuset * cs,int old_prs)184 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
185 {
186 if (old_prs == cs->partition_root_state)
187 return;
188 cgroup_file_notify(&cs->partition_file);
189
190 /* Reset prs_err if not invalid */
191 if (is_partition_valid(cs))
192 WRITE_ONCE(cs->prs_err, PERR_NONE);
193 }
194
195 static struct cpuset top_cpuset = {
196 .flags = BIT(CS_ONLINE) | BIT(CS_CPU_EXCLUSIVE) |
197 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
198 .partition_root_state = PRS_ROOT,
199 .relax_domain_level = -1,
200 .remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling),
201 };
202
203 /*
204 * There are two global locks guarding cpuset structures - cpuset_mutex and
205 * callback_lock. The cpuset code uses only cpuset_mutex. Other kernel
206 * subsystems can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
207 * structures. Note that cpuset_mutex needs to be a mutex as it is used in
208 * paths that rely on priority inheritance (e.g. scheduler - on RT) for
209 * correctness.
210 *
211 * A task must hold both locks to modify cpusets. If a task holds
212 * cpuset_mutex, it blocks others, ensuring that it is the only task able to
213 * also acquire callback_lock and be able to modify cpusets. It can perform
214 * various checks on the cpuset structure first, knowing nothing will change.
215 * It can also allocate memory while just holding cpuset_mutex. While it is
216 * performing these checks, various callback routines can briefly acquire
217 * callback_lock to query cpusets. Once it is ready to make the changes, it
218 * takes callback_lock, blocking everyone else.
219 *
220 * Calls to the kernel memory allocator can not be made while holding
221 * callback_lock, as that would risk double tripping on callback_lock
222 * from one of the callbacks into the cpuset code from within
223 * __alloc_pages().
224 *
225 * If a task is only holding callback_lock, then it has read-only
226 * access to cpusets.
227 *
228 * Now, the task_struct fields mems_allowed and mempolicy may be changed
229 * by other task, we use alloc_lock in the task_struct fields to protect
230 * them.
231 *
232 * The cpuset_common_seq_show() handlers only hold callback_lock across
233 * small pieces of code, such as when reading out possibly multi-word
234 * cpumasks and nodemasks.
235 */
236
237 static DEFINE_MUTEX(cpuset_mutex);
238
cpuset_lock(void)239 void cpuset_lock(void)
240 {
241 mutex_lock(&cpuset_mutex);
242 }
243
cpuset_unlock(void)244 void cpuset_unlock(void)
245 {
246 mutex_unlock(&cpuset_mutex);
247 }
248
249 static DEFINE_SPINLOCK(callback_lock);
250
cpuset_callback_lock_irq(void)251 void cpuset_callback_lock_irq(void)
252 {
253 spin_lock_irq(&callback_lock);
254 }
255
cpuset_callback_unlock_irq(void)256 void cpuset_callback_unlock_irq(void)
257 {
258 spin_unlock_irq(&callback_lock);
259 }
260
261 static struct workqueue_struct *cpuset_migrate_mm_wq;
262
263 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
264
check_insane_mems_config(nodemask_t * nodes)265 static inline void check_insane_mems_config(nodemask_t *nodes)
266 {
267 if (!cpusets_insane_config() &&
268 movable_only_nodes(nodes)) {
269 static_branch_enable(&cpusets_insane_config_key);
270 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
271 "Cpuset allocations might fail even with a lot of memory available.\n",
272 nodemask_pr_args(nodes));
273 }
274 }
275
276 /*
277 * decrease cs->attach_in_progress.
278 * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
279 */
dec_attach_in_progress_locked(struct cpuset * cs)280 static inline void dec_attach_in_progress_locked(struct cpuset *cs)
281 {
282 lockdep_assert_held(&cpuset_mutex);
283
284 cs->attach_in_progress--;
285 if (!cs->attach_in_progress)
286 wake_up(&cpuset_attach_wq);
287 }
288
dec_attach_in_progress(struct cpuset * cs)289 static inline void dec_attach_in_progress(struct cpuset *cs)
290 {
291 mutex_lock(&cpuset_mutex);
292 dec_attach_in_progress_locked(cs);
293 mutex_unlock(&cpuset_mutex);
294 }
295
cpuset_v2(void)296 static inline bool cpuset_v2(void)
297 {
298 return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
299 cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
300 }
301
302 /*
303 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
304 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
305 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
306 * With v2 behavior, "cpus" and "mems" are always what the users have
307 * requested and won't be changed by hotplug events. Only the effective
308 * cpus or mems will be affected.
309 */
is_in_v2_mode(void)310 static inline bool is_in_v2_mode(void)
311 {
312 return cpuset_v2() ||
313 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
314 }
315
316 /**
317 * partition_is_populated - check if partition has tasks
318 * @cs: partition root to be checked
319 * @excluded_child: a child cpuset to be excluded in task checking
320 * Return: true if there are tasks, false otherwise
321 *
322 * It is assumed that @cs is a valid partition root. @excluded_child should
323 * be non-NULL when this cpuset is going to become a partition itself.
324 */
partition_is_populated(struct cpuset * cs,struct cpuset * excluded_child)325 static inline bool partition_is_populated(struct cpuset *cs,
326 struct cpuset *excluded_child)
327 {
328 struct cgroup_subsys_state *css;
329 struct cpuset *child;
330
331 if (cs->css.cgroup->nr_populated_csets)
332 return true;
333 if (!excluded_child && !cs->nr_subparts)
334 return cgroup_is_populated(cs->css.cgroup);
335
336 rcu_read_lock();
337 cpuset_for_each_child(child, css, cs) {
338 if (child == excluded_child)
339 continue;
340 if (is_partition_valid(child))
341 continue;
342 if (cgroup_is_populated(child->css.cgroup)) {
343 rcu_read_unlock();
344 return true;
345 }
346 }
347 rcu_read_unlock();
348 return false;
349 }
350
351 /*
352 * Return in pmask the portion of a task's cpusets's cpus_allowed that
353 * are online and are capable of running the task. If none are found,
354 * walk up the cpuset hierarchy until we find one that does have some
355 * appropriate cpus.
356 *
357 * One way or another, we guarantee to return some non-empty subset
358 * of cpu_online_mask.
359 *
360 * Call with callback_lock or cpuset_mutex held.
361 */
guarantee_online_cpus(struct task_struct * tsk,struct cpumask * pmask)362 static void guarantee_online_cpus(struct task_struct *tsk,
363 struct cpumask *pmask)
364 {
365 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
366 struct cpuset *cs;
367
368 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
369 cpumask_copy(pmask, cpu_online_mask);
370
371 rcu_read_lock();
372 cs = task_cs(tsk);
373
374 while (!cpumask_intersects(cs->effective_cpus, pmask))
375 cs = parent_cs(cs);
376
377 cpumask_and(pmask, pmask, cs->effective_cpus);
378 rcu_read_unlock();
379 }
380
381 /*
382 * Return in *pmask the portion of a cpusets's mems_allowed that
383 * are online, with memory. If none are online with memory, walk
384 * up the cpuset hierarchy until we find one that does have some
385 * online mems. The top cpuset always has some mems online.
386 *
387 * One way or another, we guarantee to return some non-empty subset
388 * of node_states[N_MEMORY].
389 *
390 * Call with callback_lock or cpuset_mutex held.
391 */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)392 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
393 {
394 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
395 cs = parent_cs(cs);
396 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
397 }
398
399 /**
400 * alloc_cpumasks - allocate three cpumasks for cpuset
401 * @cs: the cpuset that have cpumasks to be allocated.
402 * @tmp: the tmpmasks structure pointer
403 * Return: 0 if successful, -ENOMEM otherwise.
404 *
405 * Only one of the two input arguments should be non-NULL.
406 */
alloc_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)407 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
408 {
409 cpumask_var_t *pmask1, *pmask2, *pmask3, *pmask4;
410
411 if (cs) {
412 pmask1 = &cs->cpus_allowed;
413 pmask2 = &cs->effective_cpus;
414 pmask3 = &cs->effective_xcpus;
415 pmask4 = &cs->exclusive_cpus;
416 } else {
417 pmask1 = &tmp->new_cpus;
418 pmask2 = &tmp->addmask;
419 pmask3 = &tmp->delmask;
420 pmask4 = NULL;
421 }
422
423 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
424 return -ENOMEM;
425
426 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
427 goto free_one;
428
429 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
430 goto free_two;
431
432 if (pmask4 && !zalloc_cpumask_var(pmask4, GFP_KERNEL))
433 goto free_three;
434
435
436 return 0;
437
438 free_three:
439 free_cpumask_var(*pmask3);
440 free_two:
441 free_cpumask_var(*pmask2);
442 free_one:
443 free_cpumask_var(*pmask1);
444 return -ENOMEM;
445 }
446
447 /**
448 * free_cpumasks - free cpumasks in a tmpmasks structure
449 * @cs: the cpuset that have cpumasks to be free.
450 * @tmp: the tmpmasks structure pointer
451 */
free_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)452 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
453 {
454 if (cs) {
455 free_cpumask_var(cs->cpus_allowed);
456 free_cpumask_var(cs->effective_cpus);
457 free_cpumask_var(cs->effective_xcpus);
458 free_cpumask_var(cs->exclusive_cpus);
459 }
460 if (tmp) {
461 free_cpumask_var(tmp->new_cpus);
462 free_cpumask_var(tmp->addmask);
463 free_cpumask_var(tmp->delmask);
464 }
465 }
466
467 /**
468 * alloc_trial_cpuset - allocate a trial cpuset
469 * @cs: the cpuset that the trial cpuset duplicates
470 */
alloc_trial_cpuset(struct cpuset * cs)471 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
472 {
473 struct cpuset *trial;
474
475 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
476 if (!trial)
477 return NULL;
478
479 if (alloc_cpumasks(trial, NULL)) {
480 kfree(trial);
481 return NULL;
482 }
483
484 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
485 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
486 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
487 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
488 return trial;
489 }
490
491 /**
492 * free_cpuset - free the cpuset
493 * @cs: the cpuset to be freed
494 */
free_cpuset(struct cpuset * cs)495 static inline void free_cpuset(struct cpuset *cs)
496 {
497 free_cpumasks(cs, NULL);
498 kfree(cs);
499 }
500
501 /* Return user specified exclusive CPUs */
user_xcpus(struct cpuset * cs)502 static inline struct cpumask *user_xcpus(struct cpuset *cs)
503 {
504 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
505 : cs->exclusive_cpus;
506 }
507
xcpus_empty(struct cpuset * cs)508 static inline bool xcpus_empty(struct cpuset *cs)
509 {
510 return cpumask_empty(cs->cpus_allowed) &&
511 cpumask_empty(cs->exclusive_cpus);
512 }
513
514 /*
515 * cpusets_are_exclusive() - check if two cpusets are exclusive
516 *
517 * Return true if exclusive, false if not
518 */
cpusets_are_exclusive(struct cpuset * cs1,struct cpuset * cs2)519 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
520 {
521 struct cpumask *xcpus1 = user_xcpus(cs1);
522 struct cpumask *xcpus2 = user_xcpus(cs2);
523
524 if (cpumask_intersects(xcpus1, xcpus2))
525 return false;
526 return true;
527 }
528
529 /*
530 * validate_change() - Used to validate that any proposed cpuset change
531 * follows the structural rules for cpusets.
532 *
533 * If we replaced the flag and mask values of the current cpuset
534 * (cur) with those values in the trial cpuset (trial), would
535 * our various subset and exclusive rules still be valid? Presumes
536 * cpuset_mutex held.
537 *
538 * 'cur' is the address of an actual, in-use cpuset. Operations
539 * such as list traversal that depend on the actual address of the
540 * cpuset in the list must use cur below, not trial.
541 *
542 * 'trial' is the address of bulk structure copy of cur, with
543 * perhaps one or more of the fields cpus_allowed, mems_allowed,
544 * or flags changed to new, trial values.
545 *
546 * Return 0 if valid, -errno if not.
547 */
548
validate_change(struct cpuset * cur,struct cpuset * trial)549 static int validate_change(struct cpuset *cur, struct cpuset *trial)
550 {
551 struct cgroup_subsys_state *css;
552 struct cpuset *c, *par;
553 int ret = 0;
554
555 rcu_read_lock();
556
557 if (!is_in_v2_mode())
558 ret = cpuset1_validate_change(cur, trial);
559 if (ret)
560 goto out;
561
562 /* Remaining checks don't apply to root cpuset */
563 if (cur == &top_cpuset)
564 goto out;
565
566 par = parent_cs(cur);
567
568 /*
569 * Cpusets with tasks - existing or newly being attached - can't
570 * be changed to have empty cpus_allowed or mems_allowed.
571 */
572 ret = -ENOSPC;
573 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
574 if (!cpumask_empty(cur->cpus_allowed) &&
575 cpumask_empty(trial->cpus_allowed))
576 goto out;
577 if (!nodes_empty(cur->mems_allowed) &&
578 nodes_empty(trial->mems_allowed))
579 goto out;
580 }
581
582 /*
583 * We can't shrink if we won't have enough room for SCHED_DEADLINE
584 * tasks. This check is not done when scheduling is disabled as the
585 * users should know what they are doing.
586 *
587 * For v1, effective_cpus == cpus_allowed & user_xcpus() returns
588 * cpus_allowed.
589 *
590 * For v2, is_cpu_exclusive() & is_sched_load_balance() are true only
591 * for non-isolated partition root. At this point, the target
592 * effective_cpus isn't computed yet. user_xcpus() is the best
593 * approximation.
594 *
595 * TBD: May need to precompute the real effective_cpus here in case
596 * incorrect scheduling of SCHED_DEADLINE tasks in a partition
597 * becomes an issue.
598 */
599 ret = -EBUSY;
600 if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
601 !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
602 goto out;
603
604 /*
605 * If either I or some sibling (!= me) is exclusive, we can't
606 * overlap. exclusive_cpus cannot overlap with each other if set.
607 */
608 ret = -EINVAL;
609 cpuset_for_each_child(c, css, par) {
610 bool txset, cxset; /* Are exclusive_cpus set? */
611
612 if (c == cur)
613 continue;
614
615 txset = !cpumask_empty(trial->exclusive_cpus);
616 cxset = !cpumask_empty(c->exclusive_cpus);
617 if (is_cpu_exclusive(trial) || is_cpu_exclusive(c) ||
618 (txset && cxset)) {
619 if (!cpusets_are_exclusive(trial, c))
620 goto out;
621 } else if (txset || cxset) {
622 struct cpumask *xcpus, *acpus;
623
624 /*
625 * When just one of the exclusive_cpus's is set,
626 * cpus_allowed of the other cpuset, if set, cannot be
627 * a subset of it or none of those CPUs will be
628 * available if these exclusive CPUs are activated.
629 */
630 if (txset) {
631 xcpus = trial->exclusive_cpus;
632 acpus = c->cpus_allowed;
633 } else {
634 xcpus = c->exclusive_cpus;
635 acpus = trial->cpus_allowed;
636 }
637 if (!cpumask_empty(acpus) && cpumask_subset(acpus, xcpus))
638 goto out;
639 }
640 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
641 nodes_intersects(trial->mems_allowed, c->mems_allowed))
642 goto out;
643 }
644
645 ret = 0;
646 out:
647 rcu_read_unlock();
648 return ret;
649 }
650
651 #ifdef CONFIG_SMP
652 /*
653 * Helper routine for generate_sched_domains().
654 * Do cpusets a, b have overlapping effective cpus_allowed masks?
655 */
cpusets_overlap(struct cpuset * a,struct cpuset * b)656 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
657 {
658 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
659 }
660
661 static void
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)662 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
663 {
664 if (dattr->relax_domain_level < c->relax_domain_level)
665 dattr->relax_domain_level = c->relax_domain_level;
666 return;
667 }
668
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * root_cs)669 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
670 struct cpuset *root_cs)
671 {
672 struct cpuset *cp;
673 struct cgroup_subsys_state *pos_css;
674
675 rcu_read_lock();
676 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
677 /* skip the whole subtree if @cp doesn't have any CPU */
678 if (cpumask_empty(cp->cpus_allowed)) {
679 pos_css = css_rightmost_descendant(pos_css);
680 continue;
681 }
682
683 if (is_sched_load_balance(cp))
684 update_domain_attr(dattr, cp);
685 }
686 rcu_read_unlock();
687 }
688
689 /* Must be called with cpuset_mutex held. */
nr_cpusets(void)690 static inline int nr_cpusets(void)
691 {
692 /* jump label reference count + the top-level cpuset */
693 return static_key_count(&cpusets_enabled_key.key) + 1;
694 }
695
696 /*
697 * generate_sched_domains()
698 *
699 * This function builds a partial partition of the systems CPUs
700 * A 'partial partition' is a set of non-overlapping subsets whose
701 * union is a subset of that set.
702 * The output of this function needs to be passed to kernel/sched/core.c
703 * partition_sched_domains() routine, which will rebuild the scheduler's
704 * load balancing domains (sched domains) as specified by that partial
705 * partition.
706 *
707 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
708 * for a background explanation of this.
709 *
710 * Does not return errors, on the theory that the callers of this
711 * routine would rather not worry about failures to rebuild sched
712 * domains when operating in the severe memory shortage situations
713 * that could cause allocation failures below.
714 *
715 * Must be called with cpuset_mutex held.
716 *
717 * The three key local variables below are:
718 * cp - cpuset pointer, used (together with pos_css) to perform a
719 * top-down scan of all cpusets. For our purposes, rebuilding
720 * the schedulers sched domains, we can ignore !is_sched_load_
721 * balance cpusets.
722 * csa - (for CpuSet Array) Array of pointers to all the cpusets
723 * that need to be load balanced, for convenient iterative
724 * access by the subsequent code that finds the best partition,
725 * i.e the set of domains (subsets) of CPUs such that the
726 * cpus_allowed of every cpuset marked is_sched_load_balance
727 * is a subset of one of these domains, while there are as
728 * many such domains as possible, each as small as possible.
729 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
730 * the kernel/sched/core.c routine partition_sched_domains() in a
731 * convenient format, that can be easily compared to the prior
732 * value to determine what partition elements (sched domains)
733 * were changed (added or removed.)
734 *
735 * Finding the best partition (set of domains):
736 * The double nested loops below over i, j scan over the load
737 * balanced cpusets (using the array of cpuset pointers in csa[])
738 * looking for pairs of cpusets that have overlapping cpus_allowed
739 * and merging them using a union-find algorithm.
740 *
741 * The union of the cpus_allowed masks from the set of all cpusets
742 * having the same root then form the one element of the partition
743 * (one sched domain) to be passed to partition_sched_domains().
744 *
745 */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)746 static int generate_sched_domains(cpumask_var_t **domains,
747 struct sched_domain_attr **attributes)
748 {
749 struct cpuset *cp; /* top-down scan of cpusets */
750 struct cpuset **csa; /* array of all cpuset ptrs */
751 int csn; /* how many cpuset ptrs in csa so far */
752 int i, j; /* indices for partition finding loops */
753 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
754 struct sched_domain_attr *dattr; /* attributes for custom domains */
755 int ndoms = 0; /* number of sched domains in result */
756 int nslot; /* next empty doms[] struct cpumask slot */
757 struct cgroup_subsys_state *pos_css;
758 bool root_load_balance = is_sched_load_balance(&top_cpuset);
759 bool cgrpv2 = cpuset_v2();
760 int nslot_update;
761
762 doms = NULL;
763 dattr = NULL;
764 csa = NULL;
765
766 /* Special case for the 99% of systems with one, full, sched domain */
767 if (root_load_balance && cpumask_empty(subpartitions_cpus)) {
768 single_root_domain:
769 ndoms = 1;
770 doms = alloc_sched_domains(ndoms);
771 if (!doms)
772 goto done;
773
774 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
775 if (dattr) {
776 *dattr = SD_ATTR_INIT;
777 update_domain_attr_tree(dattr, &top_cpuset);
778 }
779 cpumask_and(doms[0], top_cpuset.effective_cpus,
780 housekeeping_cpumask(HK_TYPE_DOMAIN));
781
782 goto done;
783 }
784
785 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
786 if (!csa)
787 goto done;
788 csn = 0;
789
790 rcu_read_lock();
791 if (root_load_balance)
792 csa[csn++] = &top_cpuset;
793 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
794 if (cp == &top_cpuset)
795 continue;
796
797 if (cgrpv2)
798 goto v2;
799
800 /*
801 * v1:
802 * Continue traversing beyond @cp iff @cp has some CPUs and
803 * isn't load balancing. The former is obvious. The
804 * latter: All child cpusets contain a subset of the
805 * parent's cpus, so just skip them, and then we call
806 * update_domain_attr_tree() to calc relax_domain_level of
807 * the corresponding sched domain.
808 */
809 if (!cpumask_empty(cp->cpus_allowed) &&
810 !(is_sched_load_balance(cp) &&
811 cpumask_intersects(cp->cpus_allowed,
812 housekeeping_cpumask(HK_TYPE_DOMAIN))))
813 continue;
814
815 if (is_sched_load_balance(cp) &&
816 !cpumask_empty(cp->effective_cpus))
817 csa[csn++] = cp;
818
819 /* skip @cp's subtree */
820 pos_css = css_rightmost_descendant(pos_css);
821 continue;
822
823 v2:
824 /*
825 * Only valid partition roots that are not isolated and with
826 * non-empty effective_cpus will be saved into csn[].
827 */
828 if ((cp->partition_root_state == PRS_ROOT) &&
829 !cpumask_empty(cp->effective_cpus))
830 csa[csn++] = cp;
831
832 /*
833 * Skip @cp's subtree if not a partition root and has no
834 * exclusive CPUs to be granted to child cpusets.
835 */
836 if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
837 pos_css = css_rightmost_descendant(pos_css);
838 }
839 rcu_read_unlock();
840
841 /*
842 * If there are only isolated partitions underneath the cgroup root,
843 * we can optimize out unneeded sched domains scanning.
844 */
845 if (root_load_balance && (csn == 1))
846 goto single_root_domain;
847
848 for (i = 0; i < csn; i++)
849 uf_node_init(&csa[i]->node);
850
851 /* Merge overlapping cpusets */
852 for (i = 0; i < csn; i++) {
853 for (j = i + 1; j < csn; j++) {
854 if (cpusets_overlap(csa[i], csa[j])) {
855 /*
856 * Cgroup v2 shouldn't pass down overlapping
857 * partition root cpusets.
858 */
859 WARN_ON_ONCE(cgrpv2);
860 uf_union(&csa[i]->node, &csa[j]->node);
861 }
862 }
863 }
864
865 /* Count the total number of domains */
866 for (i = 0; i < csn; i++) {
867 if (uf_find(&csa[i]->node) == &csa[i]->node)
868 ndoms++;
869 }
870
871 /*
872 * Now we know how many domains to create.
873 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
874 */
875 doms = alloc_sched_domains(ndoms);
876 if (!doms)
877 goto done;
878
879 /*
880 * The rest of the code, including the scheduler, can deal with
881 * dattr==NULL case. No need to abort if alloc fails.
882 */
883 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
884 GFP_KERNEL);
885
886 /*
887 * Cgroup v2 doesn't support domain attributes, just set all of them
888 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
889 * subset of HK_TYPE_DOMAIN housekeeping CPUs.
890 */
891 if (cgrpv2) {
892 for (i = 0; i < ndoms; i++) {
893 /*
894 * The top cpuset may contain some boot time isolated
895 * CPUs that need to be excluded from the sched domain.
896 */
897 if (csa[i] == &top_cpuset)
898 cpumask_and(doms[i], csa[i]->effective_cpus,
899 housekeeping_cpumask(HK_TYPE_DOMAIN));
900 else
901 cpumask_copy(doms[i], csa[i]->effective_cpus);
902 if (dattr)
903 dattr[i] = SD_ATTR_INIT;
904 }
905 goto done;
906 }
907
908 for (nslot = 0, i = 0; i < csn; i++) {
909 nslot_update = 0;
910 for (j = i; j < csn; j++) {
911 if (uf_find(&csa[j]->node) == &csa[i]->node) {
912 struct cpumask *dp = doms[nslot];
913
914 if (i == j) {
915 nslot_update = 1;
916 cpumask_clear(dp);
917 if (dattr)
918 *(dattr + nslot) = SD_ATTR_INIT;
919 }
920 cpumask_or(dp, dp, csa[j]->effective_cpus);
921 cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
922 if (dattr)
923 update_domain_attr_tree(dattr + nslot, csa[j]);
924 }
925 }
926 if (nslot_update)
927 nslot++;
928 }
929 BUG_ON(nslot != ndoms);
930
931 done:
932 kfree(csa);
933
934 /*
935 * Fallback to the default domain if kmalloc() failed.
936 * See comments in partition_sched_domains().
937 */
938 if (doms == NULL)
939 ndoms = 1;
940
941 *domains = doms;
942 *attributes = dattr;
943 return ndoms;
944 }
945
dl_update_tasks_root_domain(struct cpuset * cs)946 static void dl_update_tasks_root_domain(struct cpuset *cs)
947 {
948 struct css_task_iter it;
949 struct task_struct *task;
950
951 if (cs->nr_deadline_tasks == 0)
952 return;
953
954 css_task_iter_start(&cs->css, 0, &it);
955
956 while ((task = css_task_iter_next(&it)))
957 dl_add_task_root_domain(task);
958
959 css_task_iter_end(&it);
960 }
961
dl_rebuild_rd_accounting(void)962 void dl_rebuild_rd_accounting(void)
963 {
964 struct cpuset *cs = NULL;
965 struct cgroup_subsys_state *pos_css;
966 int cpu;
967 u64 cookie = ++dl_cookie;
968
969 lockdep_assert_held(&cpuset_mutex);
970 lockdep_assert_cpus_held();
971 lockdep_assert_held(&sched_domains_mutex);
972
973 rcu_read_lock();
974
975 for_each_possible_cpu(cpu) {
976 if (dl_bw_visited(cpu, cookie))
977 continue;
978
979 dl_clear_root_domain_cpu(cpu);
980 }
981
982 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
983
984 if (cpumask_empty(cs->effective_cpus)) {
985 pos_css = css_rightmost_descendant(pos_css);
986 continue;
987 }
988
989 css_get(&cs->css);
990
991 rcu_read_unlock();
992
993 dl_update_tasks_root_domain(cs);
994
995 rcu_read_lock();
996 css_put(&cs->css);
997 }
998 rcu_read_unlock();
999 }
1000
1001 /*
1002 * Rebuild scheduler domains.
1003 *
1004 * If the flag 'sched_load_balance' of any cpuset with non-empty
1005 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1006 * which has that flag enabled, or if any cpuset with a non-empty
1007 * 'cpus' is removed, then call this routine to rebuild the
1008 * scheduler's dynamic sched domains.
1009 *
1010 * Call with cpuset_mutex held. Takes cpus_read_lock().
1011 */
rebuild_sched_domains_locked(void)1012 void rebuild_sched_domains_locked(void)
1013 {
1014 struct cgroup_subsys_state *pos_css;
1015 struct sched_domain_attr *attr;
1016 cpumask_var_t *doms;
1017 struct cpuset *cs;
1018 int ndoms;
1019
1020 lockdep_assert_cpus_held();
1021 lockdep_assert_held(&cpuset_mutex);
1022 force_sd_rebuild = false;
1023
1024 /*
1025 * If we have raced with CPU hotplug, return early to avoid
1026 * passing doms with offlined cpu to partition_sched_domains().
1027 * Anyways, cpuset_handle_hotplug() will rebuild sched domains.
1028 *
1029 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1030 * should be the same as the active CPUs, so checking only top_cpuset
1031 * is enough to detect racing CPU offlines.
1032 */
1033 if (cpumask_empty(subpartitions_cpus) &&
1034 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1035 return;
1036
1037 /*
1038 * With subpartition CPUs, however, the effective CPUs of a partition
1039 * root should be only a subset of the active CPUs. Since a CPU in any
1040 * partition root could be offlined, all must be checked.
1041 */
1042 if (!cpumask_empty(subpartitions_cpus)) {
1043 rcu_read_lock();
1044 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1045 if (!is_partition_valid(cs)) {
1046 pos_css = css_rightmost_descendant(pos_css);
1047 continue;
1048 }
1049 if (!cpumask_subset(cs->effective_cpus,
1050 cpu_active_mask)) {
1051 rcu_read_unlock();
1052 return;
1053 }
1054 }
1055 rcu_read_unlock();
1056 }
1057
1058 /* Generate domain masks and attrs */
1059 ndoms = generate_sched_domains(&doms, &attr);
1060
1061 /* Have scheduler rebuild the domains */
1062 partition_sched_domains(ndoms, doms, attr);
1063 }
1064 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1065 void rebuild_sched_domains_locked(void)
1066 {
1067 }
1068 #endif /* CONFIG_SMP */
1069
rebuild_sched_domains_cpuslocked(void)1070 static void rebuild_sched_domains_cpuslocked(void)
1071 {
1072 mutex_lock(&cpuset_mutex);
1073 rebuild_sched_domains_locked();
1074 mutex_unlock(&cpuset_mutex);
1075 }
1076
rebuild_sched_domains(void)1077 void rebuild_sched_domains(void)
1078 {
1079 cpus_read_lock();
1080 rebuild_sched_domains_cpuslocked();
1081 cpus_read_unlock();
1082 }
1083
cpuset_reset_sched_domains(void)1084 void cpuset_reset_sched_domains(void)
1085 {
1086 mutex_lock(&cpuset_mutex);
1087 partition_sched_domains(1, NULL, NULL);
1088 mutex_unlock(&cpuset_mutex);
1089 }
1090
1091 /**
1092 * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1093 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1094 * @new_cpus: the temp variable for the new effective_cpus mask
1095 *
1096 * Iterate through each task of @cs updating its cpus_allowed to the
1097 * effective cpuset's. As this function is called with cpuset_mutex held,
1098 * cpuset membership stays stable.
1099 *
1100 * For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
1101 * to make sure all offline CPUs are also included as hotplug code won't
1102 * update cpumasks for tasks in top_cpuset.
1103 *
1104 * As task_cpu_possible_mask() can be task dependent in arm64, we have to
1105 * do cpu masking per task instead of doing it once for all.
1106 */
cpuset_update_tasks_cpumask(struct cpuset * cs,struct cpumask * new_cpus)1107 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1108 {
1109 struct css_task_iter it;
1110 struct task_struct *task;
1111 bool top_cs = cs == &top_cpuset;
1112
1113 css_task_iter_start(&cs->css, 0, &it);
1114 while ((task = css_task_iter_next(&it))) {
1115 const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1116
1117 if (top_cs) {
1118 /*
1119 * Percpu kthreads in top_cpuset are ignored
1120 */
1121 if (kthread_is_per_cpu(task))
1122 continue;
1123 cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1124 } else {
1125 cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1126 }
1127 set_cpus_allowed_ptr(task, new_cpus);
1128 }
1129 css_task_iter_end(&it);
1130 }
1131
1132 /**
1133 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1134 * @new_cpus: the temp variable for the new effective_cpus mask
1135 * @cs: the cpuset the need to recompute the new effective_cpus mask
1136 * @parent: the parent cpuset
1137 *
1138 * The result is valid only if the given cpuset isn't a partition root.
1139 */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1140 static void compute_effective_cpumask(struct cpumask *new_cpus,
1141 struct cpuset *cs, struct cpuset *parent)
1142 {
1143 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1144 }
1145
1146 /*
1147 * Commands for update_parent_effective_cpumask
1148 */
1149 enum partition_cmd {
1150 partcmd_enable, /* Enable partition root */
1151 partcmd_enablei, /* Enable isolated partition root */
1152 partcmd_disable, /* Disable partition root */
1153 partcmd_update, /* Update parent's effective_cpus */
1154 partcmd_invalidate, /* Make partition invalid */
1155 };
1156
1157 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1158 struct tmpmasks *tmp);
1159
1160 /*
1161 * Update partition exclusive flag
1162 *
1163 * Return: 0 if successful, an error code otherwise
1164 */
update_partition_exclusive_flag(struct cpuset * cs,int new_prs)1165 static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
1166 {
1167 bool exclusive = (new_prs > PRS_MEMBER);
1168
1169 if (exclusive && !is_cpu_exclusive(cs)) {
1170 if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1171 return PERR_NOTEXCL;
1172 } else if (!exclusive && is_cpu_exclusive(cs)) {
1173 /* Turning off CS_CPU_EXCLUSIVE will not return error */
1174 cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1175 }
1176 return 0;
1177 }
1178
1179 /*
1180 * Update partition load balance flag and/or rebuild sched domain
1181 *
1182 * Changing load balance flag will automatically call
1183 * rebuild_sched_domains_locked().
1184 * This function is for cgroup v2 only.
1185 */
update_partition_sd_lb(struct cpuset * cs,int old_prs)1186 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1187 {
1188 int new_prs = cs->partition_root_state;
1189 bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1190 bool new_lb;
1191
1192 /*
1193 * If cs is not a valid partition root, the load balance state
1194 * will follow its parent.
1195 */
1196 if (new_prs > 0) {
1197 new_lb = (new_prs != PRS_ISOLATED);
1198 } else {
1199 new_lb = is_sched_load_balance(parent_cs(cs));
1200 }
1201 if (new_lb != !!is_sched_load_balance(cs)) {
1202 rebuild_domains = true;
1203 if (new_lb)
1204 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1205 else
1206 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1207 }
1208
1209 if (rebuild_domains)
1210 cpuset_force_rebuild();
1211 }
1212
1213 /*
1214 * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1215 */
tasks_nocpu_error(struct cpuset * parent,struct cpuset * cs,struct cpumask * xcpus)1216 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1217 struct cpumask *xcpus)
1218 {
1219 /*
1220 * A populated partition (cs or parent) can't have empty effective_cpus
1221 */
1222 return (cpumask_subset(parent->effective_cpus, xcpus) &&
1223 partition_is_populated(parent, cs)) ||
1224 (!cpumask_intersects(xcpus, cpu_active_mask) &&
1225 partition_is_populated(cs, NULL));
1226 }
1227
reset_partition_data(struct cpuset * cs)1228 static void reset_partition_data(struct cpuset *cs)
1229 {
1230 struct cpuset *parent = parent_cs(cs);
1231
1232 if (!cpuset_v2())
1233 return;
1234
1235 lockdep_assert_held(&callback_lock);
1236
1237 cs->nr_subparts = 0;
1238 if (cpumask_empty(cs->exclusive_cpus)) {
1239 cpumask_clear(cs->effective_xcpus);
1240 if (is_cpu_exclusive(cs))
1241 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1242 }
1243 if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
1244 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1245 }
1246
1247 /*
1248 * isolated_cpus_update - Update the isolated_cpus mask
1249 * @old_prs: old partition_root_state
1250 * @new_prs: new partition_root_state
1251 * @xcpus: exclusive CPUs with state change
1252 */
isolated_cpus_update(int old_prs,int new_prs,struct cpumask * xcpus)1253 static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
1254 {
1255 WARN_ON_ONCE(old_prs == new_prs);
1256 if (new_prs == PRS_ISOLATED)
1257 cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1258 else
1259 cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1260 }
1261
1262 /*
1263 * partition_xcpus_add - Add new exclusive CPUs to partition
1264 * @new_prs: new partition_root_state
1265 * @parent: parent cpuset
1266 * @xcpus: exclusive CPUs to be added
1267 * Return: true if isolated_cpus modified, false otherwise
1268 *
1269 * Remote partition if parent == NULL
1270 */
partition_xcpus_add(int new_prs,struct cpuset * parent,struct cpumask * xcpus)1271 static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
1272 struct cpumask *xcpus)
1273 {
1274 bool isolcpus_updated;
1275
1276 WARN_ON_ONCE(new_prs < 0);
1277 lockdep_assert_held(&callback_lock);
1278 if (!parent)
1279 parent = &top_cpuset;
1280
1281
1282 if (parent == &top_cpuset)
1283 cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1284
1285 isolcpus_updated = (new_prs != parent->partition_root_state);
1286 if (isolcpus_updated)
1287 isolated_cpus_update(parent->partition_root_state, new_prs,
1288 xcpus);
1289
1290 cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1291 return isolcpus_updated;
1292 }
1293
1294 /*
1295 * partition_xcpus_del - Remove exclusive CPUs from partition
1296 * @old_prs: old partition_root_state
1297 * @parent: parent cpuset
1298 * @xcpus: exclusive CPUs to be removed
1299 * Return: true if isolated_cpus modified, false otherwise
1300 *
1301 * Remote partition if parent == NULL
1302 */
partition_xcpus_del(int old_prs,struct cpuset * parent,struct cpumask * xcpus)1303 static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
1304 struct cpumask *xcpus)
1305 {
1306 bool isolcpus_updated;
1307
1308 WARN_ON_ONCE(old_prs < 0);
1309 lockdep_assert_held(&callback_lock);
1310 if (!parent)
1311 parent = &top_cpuset;
1312
1313 if (parent == &top_cpuset)
1314 cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1315
1316 isolcpus_updated = (old_prs != parent->partition_root_state);
1317 if (isolcpus_updated)
1318 isolated_cpus_update(old_prs, parent->partition_root_state,
1319 xcpus);
1320
1321 cpumask_and(xcpus, xcpus, cpu_active_mask);
1322 cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1323 return isolcpus_updated;
1324 }
1325
update_unbound_workqueue_cpumask(bool isolcpus_updated)1326 static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
1327 {
1328 int ret;
1329
1330 lockdep_assert_cpus_held();
1331
1332 if (!isolcpus_updated)
1333 return;
1334
1335 ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
1336 WARN_ON_ONCE(ret < 0);
1337 }
1338
1339 /**
1340 * cpuset_cpu_is_isolated - Check if the given CPU is isolated
1341 * @cpu: the CPU number to be checked
1342 * Return: true if CPU is used in an isolated partition, false otherwise
1343 */
cpuset_cpu_is_isolated(int cpu)1344 bool cpuset_cpu_is_isolated(int cpu)
1345 {
1346 return cpumask_test_cpu(cpu, isolated_cpus);
1347 }
1348 EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated);
1349
1350 /*
1351 * compute_effective_exclusive_cpumask - compute effective exclusive CPUs
1352 * @cs: cpuset
1353 * @xcpus: effective exclusive CPUs value to be set
1354 * @real_cs: the real cpuset (can be NULL)
1355 * Return: 0 if there is no sibling conflict, > 0 otherwise
1356 *
1357 * If exclusive_cpus isn't explicitly set or a real_cs is provided, we have to
1358 * scan the sibling cpusets and exclude their exclusive_cpus or effective_xcpus
1359 * as well. The provision of real_cs means that a cpumask is being changed and
1360 * the given cs is a trial one.
1361 */
compute_effective_exclusive_cpumask(struct cpuset * cs,struct cpumask * xcpus,struct cpuset * real_cs)1362 static int compute_effective_exclusive_cpumask(struct cpuset *cs,
1363 struct cpumask *xcpus,
1364 struct cpuset *real_cs)
1365 {
1366 struct cgroup_subsys_state *css;
1367 struct cpuset *parent = parent_cs(cs);
1368 struct cpuset *sibling;
1369 int retval = 0;
1370
1371 if (!xcpus)
1372 xcpus = cs->effective_xcpus;
1373
1374 cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus);
1375
1376 if (!real_cs) {
1377 if (!cpumask_empty(cs->exclusive_cpus))
1378 return 0;
1379 } else {
1380 cs = real_cs;
1381 }
1382
1383 /*
1384 * Exclude exclusive CPUs from siblings
1385 */
1386 rcu_read_lock();
1387 cpuset_for_each_child(sibling, css, parent) {
1388 if (sibling == cs)
1389 continue;
1390
1391 if (!cpumask_empty(sibling->exclusive_cpus) &&
1392 cpumask_intersects(xcpus, sibling->exclusive_cpus)) {
1393 cpumask_andnot(xcpus, xcpus, sibling->exclusive_cpus);
1394 retval++;
1395 continue;
1396 }
1397 if (!cpumask_empty(sibling->effective_xcpus) &&
1398 cpumask_intersects(xcpus, sibling->effective_xcpus)) {
1399 cpumask_andnot(xcpus, xcpus, sibling->effective_xcpus);
1400 retval++;
1401 }
1402 }
1403 rcu_read_unlock();
1404 return retval;
1405 }
1406
is_remote_partition(struct cpuset * cs)1407 static inline bool is_remote_partition(struct cpuset *cs)
1408 {
1409 return !list_empty(&cs->remote_sibling);
1410 }
1411
is_local_partition(struct cpuset * cs)1412 static inline bool is_local_partition(struct cpuset *cs)
1413 {
1414 return is_partition_valid(cs) && !is_remote_partition(cs);
1415 }
1416
1417 /*
1418 * remote_partition_enable - Enable current cpuset as a remote partition root
1419 * @cs: the cpuset to update
1420 * @new_prs: new partition_root_state
1421 * @tmp: temporary masks
1422 * Return: 0 if successful, errcode if error
1423 *
1424 * Enable the current cpuset to become a remote partition root taking CPUs
1425 * directly from the top cpuset. cpuset_mutex must be held by the caller.
1426 */
remote_partition_enable(struct cpuset * cs,int new_prs,struct tmpmasks * tmp)1427 static int remote_partition_enable(struct cpuset *cs, int new_prs,
1428 struct tmpmasks *tmp)
1429 {
1430 bool isolcpus_updated;
1431
1432 /*
1433 * The user must have sysadmin privilege.
1434 */
1435 if (!capable(CAP_SYS_ADMIN))
1436 return PERR_ACCESS;
1437
1438 /*
1439 * The requested exclusive_cpus must not be allocated to other
1440 * partitions and it can't use up all the root's effective_cpus.
1441 *
1442 * Note that if there is any local partition root above it or
1443 * remote partition root underneath it, its exclusive_cpus must
1444 * have overlapped with subpartitions_cpus.
1445 */
1446 compute_effective_exclusive_cpumask(cs, tmp->new_cpus, NULL);
1447 if (cpumask_empty(tmp->new_cpus) ||
1448 cpumask_intersects(tmp->new_cpus, subpartitions_cpus) ||
1449 cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1450 return PERR_INVCPUS;
1451
1452 spin_lock_irq(&callback_lock);
1453 isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1454 list_add(&cs->remote_sibling, &remote_children);
1455 cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
1456 spin_unlock_irq(&callback_lock);
1457 update_unbound_workqueue_cpumask(isolcpus_updated);
1458 cpuset_force_rebuild();
1459 cs->prs_err = 0;
1460
1461 /*
1462 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1463 */
1464 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1465 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1466 return 0;
1467 }
1468
1469 /*
1470 * remote_partition_disable - Remove current cpuset from remote partition list
1471 * @cs: the cpuset to update
1472 * @tmp: temporary masks
1473 *
1474 * The effective_cpus is also updated.
1475 *
1476 * cpuset_mutex must be held by the caller.
1477 */
remote_partition_disable(struct cpuset * cs,struct tmpmasks * tmp)1478 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1479 {
1480 bool isolcpus_updated;
1481
1482 WARN_ON_ONCE(!is_remote_partition(cs));
1483 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1484
1485 spin_lock_irq(&callback_lock);
1486 list_del_init(&cs->remote_sibling);
1487 isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
1488 NULL, cs->effective_xcpus);
1489 if (cs->prs_err)
1490 cs->partition_root_state = -cs->partition_root_state;
1491 else
1492 cs->partition_root_state = PRS_MEMBER;
1493
1494 /* effective_xcpus may need to be changed */
1495 compute_effective_exclusive_cpumask(cs, NULL, NULL);
1496 reset_partition_data(cs);
1497 spin_unlock_irq(&callback_lock);
1498 update_unbound_workqueue_cpumask(isolcpus_updated);
1499 cpuset_force_rebuild();
1500
1501 /*
1502 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1503 */
1504 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1505 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1506 }
1507
1508 /*
1509 * remote_cpus_update - cpus_exclusive change of remote partition
1510 * @cs: the cpuset to be updated
1511 * @xcpus: the new exclusive_cpus mask, if non-NULL
1512 * @excpus: the new effective_xcpus mask
1513 * @tmp: temporary masks
1514 *
1515 * top_cpuset and subpartitions_cpus will be updated or partition can be
1516 * invalidated.
1517 */
remote_cpus_update(struct cpuset * cs,struct cpumask * xcpus,struct cpumask * excpus,struct tmpmasks * tmp)1518 static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
1519 struct cpumask *excpus, struct tmpmasks *tmp)
1520 {
1521 bool adding, deleting;
1522 int prs = cs->partition_root_state;
1523 int isolcpus_updated = 0;
1524
1525 if (WARN_ON_ONCE(!is_remote_partition(cs)))
1526 return;
1527
1528 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1529
1530 if (cpumask_empty(excpus)) {
1531 cs->prs_err = PERR_CPUSEMPTY;
1532 goto invalidate;
1533 }
1534
1535 adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus);
1536 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus);
1537
1538 /*
1539 * Additions of remote CPUs is only allowed if those CPUs are
1540 * not allocated to other partitions and there are effective_cpus
1541 * left in the top cpuset.
1542 */
1543 if (adding) {
1544 if (!capable(CAP_SYS_ADMIN))
1545 cs->prs_err = PERR_ACCESS;
1546 else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1547 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
1548 cs->prs_err = PERR_NOCPUS;
1549 if (cs->prs_err)
1550 goto invalidate;
1551 }
1552
1553 spin_lock_irq(&callback_lock);
1554 if (adding)
1555 isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask);
1556 if (deleting)
1557 isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
1558 /*
1559 * Need to update effective_xcpus and exclusive_cpus now as
1560 * update_sibling_cpumasks() below may iterate back to the same cs.
1561 */
1562 cpumask_copy(cs->effective_xcpus, excpus);
1563 if (xcpus)
1564 cpumask_copy(cs->exclusive_cpus, xcpus);
1565 spin_unlock_irq(&callback_lock);
1566 update_unbound_workqueue_cpumask(isolcpus_updated);
1567 if (adding || deleting)
1568 cpuset_force_rebuild();
1569
1570 /*
1571 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1572 */
1573 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1574 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1575 return;
1576
1577 invalidate:
1578 remote_partition_disable(cs, tmp);
1579 }
1580
1581 /*
1582 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1583 * @prstate: partition root state to be checked
1584 * @new_cpus: cpu mask
1585 * Return: true if there is conflict, false otherwise
1586 *
1587 * CPUs outside of boot_hk_cpus, if defined, can only be used in an
1588 * isolated partition.
1589 */
prstate_housekeeping_conflict(int prstate,struct cpumask * new_cpus)1590 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1591 {
1592 if (!have_boot_isolcpus)
1593 return false;
1594
1595 if ((prstate != PRS_ISOLATED) && !cpumask_subset(new_cpus, boot_hk_cpus))
1596 return true;
1597
1598 return false;
1599 }
1600
1601 /**
1602 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1603 * @cs: The cpuset that requests change in partition root state
1604 * @cmd: Partition root state change command
1605 * @newmask: Optional new cpumask for partcmd_update
1606 * @tmp: Temporary addmask and delmask
1607 * Return: 0 or a partition root state error code
1608 *
1609 * For partcmd_enable*, the cpuset is being transformed from a non-partition
1610 * root to a partition root. The effective_xcpus (cpus_allowed if
1611 * effective_xcpus not set) mask of the given cpuset will be taken away from
1612 * parent's effective_cpus. The function will return 0 if all the CPUs listed
1613 * in effective_xcpus can be granted or an error code will be returned.
1614 *
1615 * For partcmd_disable, the cpuset is being transformed from a partition
1616 * root back to a non-partition root. Any CPUs in effective_xcpus will be
1617 * given back to parent's effective_cpus. 0 will always be returned.
1618 *
1619 * For partcmd_update, if the optional newmask is specified, the cpu list is
1620 * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1621 * assumed to remain the same. The cpuset should either be a valid or invalid
1622 * partition root. The partition root state may change from valid to invalid
1623 * or vice versa. An error code will be returned if transitioning from
1624 * invalid to valid violates the exclusivity rule.
1625 *
1626 * For partcmd_invalidate, the current partition will be made invalid.
1627 *
1628 * The partcmd_enable* and partcmd_disable commands are used by
1629 * update_prstate(). An error code may be returned and the caller will check
1630 * for error.
1631 *
1632 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1633 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1634 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1635 * check for error and so partition_root_state and prs_err will be updated
1636 * directly.
1637 */
update_parent_effective_cpumask(struct cpuset * cs,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1638 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1639 struct cpumask *newmask,
1640 struct tmpmasks *tmp)
1641 {
1642 struct cpuset *parent = parent_cs(cs);
1643 int adding; /* Adding cpus to parent's effective_cpus */
1644 int deleting; /* Deleting cpus from parent's effective_cpus */
1645 int old_prs, new_prs;
1646 int part_error = PERR_NONE; /* Partition error? */
1647 int subparts_delta = 0;
1648 int isolcpus_updated = 0;
1649 struct cpumask *xcpus = user_xcpus(cs);
1650 bool nocpu;
1651
1652 lockdep_assert_held(&cpuset_mutex);
1653 WARN_ON_ONCE(is_remote_partition(cs));
1654
1655 /*
1656 * new_prs will only be changed for the partcmd_update and
1657 * partcmd_invalidate commands.
1658 */
1659 adding = deleting = false;
1660 old_prs = new_prs = cs->partition_root_state;
1661
1662 if (cmd == partcmd_invalidate) {
1663 if (is_prs_invalid(old_prs))
1664 return 0;
1665
1666 /*
1667 * Make the current partition invalid.
1668 */
1669 if (is_partition_valid(parent))
1670 adding = cpumask_and(tmp->addmask,
1671 xcpus, parent->effective_xcpus);
1672 if (old_prs > 0) {
1673 new_prs = -old_prs;
1674 subparts_delta--;
1675 }
1676 goto write_error;
1677 }
1678
1679 /*
1680 * The parent must be a partition root.
1681 * The new cpumask, if present, or the current cpus_allowed must
1682 * not be empty.
1683 */
1684 if (!is_partition_valid(parent)) {
1685 return is_partition_invalid(parent)
1686 ? PERR_INVPARENT : PERR_NOTPART;
1687 }
1688 if (!newmask && xcpus_empty(cs))
1689 return PERR_CPUSEMPTY;
1690
1691 nocpu = tasks_nocpu_error(parent, cs, xcpus);
1692
1693 if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
1694 /*
1695 * Need to call compute_effective_exclusive_cpumask() in case
1696 * exclusive_cpus not set. Sibling conflict should only happen
1697 * if exclusive_cpus isn't set.
1698 */
1699 xcpus = tmp->new_cpus;
1700 if (compute_effective_exclusive_cpumask(cs, xcpus, NULL))
1701 WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
1702
1703 /*
1704 * Enabling partition root is not allowed if its
1705 * effective_xcpus is empty.
1706 */
1707 if (cpumask_empty(xcpus))
1708 return PERR_INVCPUS;
1709
1710 if (prstate_housekeeping_conflict(new_prs, xcpus))
1711 return PERR_HKEEPING;
1712
1713 /*
1714 * A parent can be left with no CPU as long as there is no
1715 * task directly associated with the parent partition.
1716 */
1717 if (nocpu)
1718 return PERR_NOCPUS;
1719
1720 deleting = cpumask_and(tmp->delmask, xcpus, parent->effective_xcpus);
1721 if (deleting)
1722 subparts_delta++;
1723 new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1724 } else if (cmd == partcmd_disable) {
1725 /*
1726 * May need to add cpus back to parent's effective_cpus
1727 * (and maybe removed from subpartitions_cpus/isolated_cpus)
1728 * for valid partition root. xcpus may contain CPUs that
1729 * shouldn't be removed from the two global cpumasks.
1730 */
1731 if (is_partition_valid(cs)) {
1732 cpumask_copy(tmp->addmask, cs->effective_xcpus);
1733 adding = true;
1734 subparts_delta--;
1735 }
1736 new_prs = PRS_MEMBER;
1737 } else if (newmask) {
1738 /*
1739 * Empty cpumask is not allowed
1740 */
1741 if (cpumask_empty(newmask)) {
1742 part_error = PERR_CPUSEMPTY;
1743 goto write_error;
1744 }
1745
1746 /* Check newmask again, whether cpus are available for parent/cs */
1747 nocpu |= tasks_nocpu_error(parent, cs, newmask);
1748
1749 /*
1750 * partcmd_update with newmask:
1751 *
1752 * Compute add/delete mask to/from effective_cpus
1753 *
1754 * For valid partition:
1755 * addmask = exclusive_cpus & ~newmask
1756 * & parent->effective_xcpus
1757 * delmask = newmask & ~exclusive_cpus
1758 * & parent->effective_xcpus
1759 *
1760 * For invalid partition:
1761 * delmask = newmask & parent->effective_xcpus
1762 */
1763 if (is_prs_invalid(old_prs)) {
1764 adding = false;
1765 deleting = cpumask_and(tmp->delmask,
1766 newmask, parent->effective_xcpus);
1767 } else {
1768 cpumask_andnot(tmp->addmask, xcpus, newmask);
1769 adding = cpumask_and(tmp->addmask, tmp->addmask,
1770 parent->effective_xcpus);
1771
1772 cpumask_andnot(tmp->delmask, newmask, xcpus);
1773 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1774 parent->effective_xcpus);
1775 }
1776 /*
1777 * Make partition invalid if parent's effective_cpus could
1778 * become empty and there are tasks in the parent.
1779 */
1780 if (nocpu && (!adding ||
1781 !cpumask_intersects(tmp->addmask, cpu_active_mask))) {
1782 part_error = PERR_NOCPUS;
1783 deleting = false;
1784 adding = cpumask_and(tmp->addmask,
1785 xcpus, parent->effective_xcpus);
1786 }
1787 } else {
1788 /*
1789 * partcmd_update w/o newmask
1790 *
1791 * delmask = effective_xcpus & parent->effective_cpus
1792 *
1793 * This can be called from:
1794 * 1) update_cpumasks_hier()
1795 * 2) cpuset_hotplug_update_tasks()
1796 *
1797 * Check to see if it can be transitioned from valid to
1798 * invalid partition or vice versa.
1799 *
1800 * A partition error happens when parent has tasks and all
1801 * its effective CPUs will have to be distributed out.
1802 */
1803 WARN_ON_ONCE(!is_partition_valid(parent));
1804 if (nocpu) {
1805 part_error = PERR_NOCPUS;
1806 if (is_partition_valid(cs))
1807 adding = cpumask_and(tmp->addmask,
1808 xcpus, parent->effective_xcpus);
1809 } else if (is_partition_invalid(cs) &&
1810 cpumask_subset(xcpus, parent->effective_xcpus)) {
1811 struct cgroup_subsys_state *css;
1812 struct cpuset *child;
1813 bool exclusive = true;
1814
1815 /*
1816 * Convert invalid partition to valid has to
1817 * pass the cpu exclusivity test.
1818 */
1819 rcu_read_lock();
1820 cpuset_for_each_child(child, css, parent) {
1821 if (child == cs)
1822 continue;
1823 if (!cpusets_are_exclusive(cs, child)) {
1824 exclusive = false;
1825 break;
1826 }
1827 }
1828 rcu_read_unlock();
1829 if (exclusive)
1830 deleting = cpumask_and(tmp->delmask,
1831 xcpus, parent->effective_cpus);
1832 else
1833 part_error = PERR_NOTEXCL;
1834 }
1835 }
1836
1837 write_error:
1838 if (part_error)
1839 WRITE_ONCE(cs->prs_err, part_error);
1840
1841 if (cmd == partcmd_update) {
1842 /*
1843 * Check for possible transition between valid and invalid
1844 * partition root.
1845 */
1846 switch (cs->partition_root_state) {
1847 case PRS_ROOT:
1848 case PRS_ISOLATED:
1849 if (part_error) {
1850 new_prs = -old_prs;
1851 subparts_delta--;
1852 }
1853 break;
1854 case PRS_INVALID_ROOT:
1855 case PRS_INVALID_ISOLATED:
1856 if (!part_error) {
1857 new_prs = -old_prs;
1858 subparts_delta++;
1859 }
1860 break;
1861 }
1862 }
1863
1864 if (!adding && !deleting && (new_prs == old_prs))
1865 return 0;
1866
1867 /*
1868 * Transitioning between invalid to valid or vice versa may require
1869 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
1870 * validate_change() has already been successfully called and
1871 * CPU lists in cs haven't been updated yet. So defer it to later.
1872 */
1873 if ((old_prs != new_prs) && (cmd != partcmd_update)) {
1874 int err = update_partition_exclusive_flag(cs, new_prs);
1875
1876 if (err)
1877 return err;
1878 }
1879
1880 /*
1881 * Change the parent's effective_cpus & effective_xcpus (top cpuset
1882 * only).
1883 *
1884 * Newly added CPUs will be removed from effective_cpus and
1885 * newly deleted ones will be added back to effective_cpus.
1886 */
1887 spin_lock_irq(&callback_lock);
1888 if (old_prs != new_prs) {
1889 cs->partition_root_state = new_prs;
1890 if (new_prs <= 0)
1891 cs->nr_subparts = 0;
1892 }
1893 /*
1894 * Adding to parent's effective_cpus means deletion CPUs from cs
1895 * and vice versa.
1896 */
1897 if (adding)
1898 isolcpus_updated += partition_xcpus_del(old_prs, parent,
1899 tmp->addmask);
1900 if (deleting)
1901 isolcpus_updated += partition_xcpus_add(new_prs, parent,
1902 tmp->delmask);
1903
1904 if (is_partition_valid(parent)) {
1905 parent->nr_subparts += subparts_delta;
1906 WARN_ON_ONCE(parent->nr_subparts < 0);
1907 }
1908 spin_unlock_irq(&callback_lock);
1909 update_unbound_workqueue_cpumask(isolcpus_updated);
1910
1911 if ((old_prs != new_prs) && (cmd == partcmd_update))
1912 update_partition_exclusive_flag(cs, new_prs);
1913
1914 if (adding || deleting) {
1915 cpuset_update_tasks_cpumask(parent, tmp->addmask);
1916 update_sibling_cpumasks(parent, cs, tmp);
1917 }
1918
1919 /*
1920 * For partcmd_update without newmask, it is being called from
1921 * cpuset_handle_hotplug(). Update the load balance flag and
1922 * scheduling domain accordingly.
1923 */
1924 if ((cmd == partcmd_update) && !newmask)
1925 update_partition_sd_lb(cs, old_prs);
1926
1927 notify_partition_change(cs, old_prs);
1928 return 0;
1929 }
1930
1931 /**
1932 * compute_partition_effective_cpumask - compute effective_cpus for partition
1933 * @cs: partition root cpuset
1934 * @new_ecpus: previously computed effective_cpus to be updated
1935 *
1936 * Compute the effective_cpus of a partition root by scanning effective_xcpus
1937 * of child partition roots and excluding their effective_xcpus.
1938 *
1939 * This has the side effect of invalidating valid child partition roots,
1940 * if necessary. Since it is called from either cpuset_hotplug_update_tasks()
1941 * or update_cpumasks_hier() where parent and children are modified
1942 * successively, we don't need to call update_parent_effective_cpumask()
1943 * and the child's effective_cpus will be updated in later iterations.
1944 *
1945 * Note that rcu_read_lock() is assumed to be held.
1946 */
compute_partition_effective_cpumask(struct cpuset * cs,struct cpumask * new_ecpus)1947 static void compute_partition_effective_cpumask(struct cpuset *cs,
1948 struct cpumask *new_ecpus)
1949 {
1950 struct cgroup_subsys_state *css;
1951 struct cpuset *child;
1952 bool populated = partition_is_populated(cs, NULL);
1953
1954 /*
1955 * Check child partition roots to see if they should be
1956 * invalidated when
1957 * 1) child effective_xcpus not a subset of new
1958 * excluisve_cpus
1959 * 2) All the effective_cpus will be used up and cp
1960 * has tasks
1961 */
1962 compute_effective_exclusive_cpumask(cs, new_ecpus, NULL);
1963 cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
1964
1965 rcu_read_lock();
1966 cpuset_for_each_child(child, css, cs) {
1967 if (!is_partition_valid(child))
1968 continue;
1969
1970 /*
1971 * There shouldn't be a remote partition underneath another
1972 * partition root.
1973 */
1974 WARN_ON_ONCE(is_remote_partition(child));
1975 child->prs_err = 0;
1976 if (!cpumask_subset(child->effective_xcpus,
1977 cs->effective_xcpus))
1978 child->prs_err = PERR_INVCPUS;
1979 else if (populated &&
1980 cpumask_subset(new_ecpus, child->effective_xcpus))
1981 child->prs_err = PERR_NOCPUS;
1982
1983 if (child->prs_err) {
1984 int old_prs = child->partition_root_state;
1985
1986 /*
1987 * Invalidate child partition
1988 */
1989 spin_lock_irq(&callback_lock);
1990 make_partition_invalid(child);
1991 cs->nr_subparts--;
1992 child->nr_subparts = 0;
1993 spin_unlock_irq(&callback_lock);
1994 notify_partition_change(child, old_prs);
1995 continue;
1996 }
1997 cpumask_andnot(new_ecpus, new_ecpus,
1998 child->effective_xcpus);
1999 }
2000 rcu_read_unlock();
2001 }
2002
2003 /*
2004 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2005 * @cs: the cpuset to consider
2006 * @tmp: temp variables for calculating effective_cpus & partition setup
2007 * @force: don't skip any descendant cpusets if set
2008 *
2009 * When configured cpumask is changed, the effective cpumasks of this cpuset
2010 * and all its descendants need to be updated.
2011 *
2012 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
2013 *
2014 * Called with cpuset_mutex held
2015 */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp,bool force)2016 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2017 bool force)
2018 {
2019 struct cpuset *cp;
2020 struct cgroup_subsys_state *pos_css;
2021 bool need_rebuild_sched_domains = false;
2022 int old_prs, new_prs;
2023
2024 rcu_read_lock();
2025 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2026 struct cpuset *parent = parent_cs(cp);
2027 bool remote = is_remote_partition(cp);
2028 bool update_parent = false;
2029
2030 old_prs = new_prs = cp->partition_root_state;
2031
2032 /*
2033 * For child remote partition root (!= cs), we need to call
2034 * remote_cpus_update() if effective_xcpus will be changed.
2035 * Otherwise, we can skip the whole subtree.
2036 *
2037 * remote_cpus_update() will reuse tmp->new_cpus only after
2038 * its value is being processed.
2039 */
2040 if (remote && (cp != cs)) {
2041 compute_effective_exclusive_cpumask(cp, tmp->new_cpus, NULL);
2042 if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) {
2043 pos_css = css_rightmost_descendant(pos_css);
2044 continue;
2045 }
2046 rcu_read_unlock();
2047 remote_cpus_update(cp, NULL, tmp->new_cpus, tmp);
2048 rcu_read_lock();
2049
2050 /* Remote partition may be invalidated */
2051 new_prs = cp->partition_root_state;
2052 remote = (new_prs == old_prs);
2053 }
2054
2055 if (remote || (is_partition_valid(parent) && is_partition_valid(cp)))
2056 compute_partition_effective_cpumask(cp, tmp->new_cpus);
2057 else
2058 compute_effective_cpumask(tmp->new_cpus, cp, parent);
2059
2060 if (remote)
2061 goto get_css; /* Ready to update cpuset data */
2062
2063 /*
2064 * A partition with no effective_cpus is allowed as long as
2065 * there is no task associated with it. Call
2066 * update_parent_effective_cpumask() to check it.
2067 */
2068 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2069 update_parent = true;
2070 goto update_parent_effective;
2071 }
2072
2073 /*
2074 * If it becomes empty, inherit the effective mask of the
2075 * parent, which is guaranteed to have some CPUs unless
2076 * it is a partition root that has explicitly distributed
2077 * out all its CPUs.
2078 */
2079 if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
2080 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2081
2082 /*
2083 * Skip the whole subtree if
2084 * 1) the cpumask remains the same,
2085 * 2) has no partition root state,
2086 * 3) force flag not set, and
2087 * 4) for v2 load balance state same as its parent.
2088 */
2089 if (!cp->partition_root_state && !force &&
2090 cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2091 (!cpuset_v2() ||
2092 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2093 pos_css = css_rightmost_descendant(pos_css);
2094 continue;
2095 }
2096
2097 update_parent_effective:
2098 /*
2099 * update_parent_effective_cpumask() should have been called
2100 * for cs already in update_cpumask(). We should also call
2101 * cpuset_update_tasks_cpumask() again for tasks in the parent
2102 * cpuset if the parent's effective_cpus changes.
2103 */
2104 if ((cp != cs) && old_prs) {
2105 switch (parent->partition_root_state) {
2106 case PRS_ROOT:
2107 case PRS_ISOLATED:
2108 update_parent = true;
2109 break;
2110
2111 default:
2112 /*
2113 * When parent is not a partition root or is
2114 * invalid, child partition roots become
2115 * invalid too.
2116 */
2117 if (is_partition_valid(cp))
2118 new_prs = -cp->partition_root_state;
2119 WRITE_ONCE(cp->prs_err,
2120 is_partition_invalid(parent)
2121 ? PERR_INVPARENT : PERR_NOTPART);
2122 break;
2123 }
2124 }
2125 get_css:
2126 if (!css_tryget_online(&cp->css))
2127 continue;
2128 rcu_read_unlock();
2129
2130 if (update_parent) {
2131 update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2132 /*
2133 * The cpuset partition_root_state may become
2134 * invalid. Capture it.
2135 */
2136 new_prs = cp->partition_root_state;
2137 }
2138
2139 spin_lock_irq(&callback_lock);
2140 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2141 cp->partition_root_state = new_prs;
2142 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs))
2143 compute_effective_exclusive_cpumask(cp, NULL, NULL);
2144
2145 /*
2146 * Make sure effective_xcpus is properly set for a valid
2147 * partition root.
2148 */
2149 if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus))
2150 cpumask_and(cp->effective_xcpus,
2151 cp->cpus_allowed, parent->effective_xcpus);
2152 else if (new_prs < 0)
2153 reset_partition_data(cp);
2154 spin_unlock_irq(&callback_lock);
2155
2156 notify_partition_change(cp, old_prs);
2157
2158 WARN_ON(!is_in_v2_mode() &&
2159 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2160
2161 cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
2162
2163 /*
2164 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2165 * from parent if current cpuset isn't a valid partition root
2166 * and their load balance states differ.
2167 */
2168 if (cpuset_v2() && !is_partition_valid(cp) &&
2169 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2170 if (is_sched_load_balance(parent))
2171 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2172 else
2173 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2174 }
2175
2176 /*
2177 * On legacy hierarchy, if the effective cpumask of any non-
2178 * empty cpuset is changed, we need to rebuild sched domains.
2179 * On default hierarchy, the cpuset needs to be a partition
2180 * root as well.
2181 */
2182 if (!cpumask_empty(cp->cpus_allowed) &&
2183 is_sched_load_balance(cp) &&
2184 (!cpuset_v2() || is_partition_valid(cp)))
2185 need_rebuild_sched_domains = true;
2186
2187 rcu_read_lock();
2188 css_put(&cp->css);
2189 }
2190 rcu_read_unlock();
2191
2192 if (need_rebuild_sched_domains)
2193 cpuset_force_rebuild();
2194 }
2195
2196 /**
2197 * update_sibling_cpumasks - Update siblings cpumasks
2198 * @parent: Parent cpuset
2199 * @cs: Current cpuset
2200 * @tmp: Temp variables
2201 */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)2202 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2203 struct tmpmasks *tmp)
2204 {
2205 struct cpuset *sibling;
2206 struct cgroup_subsys_state *pos_css;
2207
2208 lockdep_assert_held(&cpuset_mutex);
2209
2210 /*
2211 * Check all its siblings and call update_cpumasks_hier()
2212 * if their effective_cpus will need to be changed.
2213 *
2214 * It is possible a change in parent's effective_cpus
2215 * due to a change in a child partition's effective_xcpus will impact
2216 * its siblings even if they do not inherit parent's effective_cpus
2217 * directly.
2218 *
2219 * The update_cpumasks_hier() function may sleep. So we have to
2220 * release the RCU read lock before calling it.
2221 */
2222 rcu_read_lock();
2223 cpuset_for_each_child(sibling, pos_css, parent) {
2224 if (sibling == cs)
2225 continue;
2226 if (!is_partition_valid(sibling)) {
2227 compute_effective_cpumask(tmp->new_cpus, sibling,
2228 parent);
2229 if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2230 continue;
2231 } else if (is_remote_partition(sibling)) {
2232 /*
2233 * Change in a sibling cpuset won't affect a remote
2234 * partition root.
2235 */
2236 continue;
2237 }
2238
2239 if (!css_tryget_online(&sibling->css))
2240 continue;
2241
2242 rcu_read_unlock();
2243 update_cpumasks_hier(sibling, tmp, false);
2244 rcu_read_lock();
2245 css_put(&sibling->css);
2246 }
2247 rcu_read_unlock();
2248 }
2249
2250 /**
2251 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2252 * @cs: the cpuset to consider
2253 * @trialcs: trial cpuset
2254 * @buf: buffer of cpu numbers written to this cpuset
2255 */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2256 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2257 const char *buf)
2258 {
2259 int retval;
2260 struct tmpmasks tmp;
2261 struct cpuset *parent = parent_cs(cs);
2262 bool invalidate = false;
2263 bool force = false;
2264 int old_prs = cs->partition_root_state;
2265
2266 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
2267 if (cs == &top_cpuset)
2268 return -EACCES;
2269
2270 /*
2271 * An empty cpus_allowed is ok only if the cpuset has no tasks.
2272 * Since cpulist_parse() fails on an empty mask, we special case
2273 * that parsing. The validate_change() call ensures that cpusets
2274 * with tasks have cpus.
2275 */
2276 if (!*buf) {
2277 cpumask_clear(trialcs->cpus_allowed);
2278 if (cpumask_empty(trialcs->exclusive_cpus))
2279 cpumask_clear(trialcs->effective_xcpus);
2280 } else {
2281 retval = cpulist_parse(buf, trialcs->cpus_allowed);
2282 if (retval < 0)
2283 return retval;
2284
2285 if (!cpumask_subset(trialcs->cpus_allowed,
2286 top_cpuset.cpus_allowed))
2287 return -EINVAL;
2288
2289 /*
2290 * When exclusive_cpus isn't explicitly set, it is constrained
2291 * by cpus_allowed and parent's effective_xcpus. Otherwise,
2292 * trialcs->effective_xcpus is used as a temporary cpumask
2293 * for checking validity of the partition root.
2294 */
2295 trialcs->partition_root_state = PRS_MEMBER;
2296 if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs))
2297 compute_effective_exclusive_cpumask(trialcs, NULL, cs);
2298 }
2299
2300 /* Nothing to do if the cpus didn't change */
2301 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2302 return 0;
2303
2304 if (alloc_cpumasks(NULL, &tmp))
2305 return -ENOMEM;
2306
2307 if (old_prs) {
2308 if (is_partition_valid(cs) &&
2309 cpumask_empty(trialcs->effective_xcpus)) {
2310 invalidate = true;
2311 cs->prs_err = PERR_INVCPUS;
2312 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) {
2313 invalidate = true;
2314 cs->prs_err = PERR_HKEEPING;
2315 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2316 invalidate = true;
2317 cs->prs_err = PERR_NOCPUS;
2318 }
2319 }
2320
2321 /*
2322 * Check all the descendants in update_cpumasks_hier() if
2323 * effective_xcpus is to be changed.
2324 */
2325 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2326
2327 retval = validate_change(cs, trialcs);
2328
2329 if ((retval == -EINVAL) && cpuset_v2()) {
2330 struct cgroup_subsys_state *css;
2331 struct cpuset *cp;
2332
2333 /*
2334 * The -EINVAL error code indicates that partition sibling
2335 * CPU exclusivity rule has been violated. We still allow
2336 * the cpumask change to proceed while invalidating the
2337 * partition. However, any conflicting sibling partitions
2338 * have to be marked as invalid too.
2339 */
2340 invalidate = true;
2341 rcu_read_lock();
2342 cpuset_for_each_child(cp, css, parent) {
2343 struct cpumask *xcpus = user_xcpus(trialcs);
2344
2345 if (is_partition_valid(cp) &&
2346 cpumask_intersects(xcpus, cp->effective_xcpus)) {
2347 rcu_read_unlock();
2348 update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, &tmp);
2349 rcu_read_lock();
2350 }
2351 }
2352 rcu_read_unlock();
2353 retval = 0;
2354 }
2355
2356 if (retval < 0)
2357 goto out_free;
2358
2359 if (is_partition_valid(cs) ||
2360 (is_partition_invalid(cs) && !invalidate)) {
2361 struct cpumask *xcpus = trialcs->effective_xcpus;
2362
2363 if (cpumask_empty(xcpus) && is_partition_invalid(cs))
2364 xcpus = trialcs->cpus_allowed;
2365
2366 /*
2367 * Call remote_cpus_update() to handle valid remote partition
2368 */
2369 if (is_remote_partition(cs))
2370 remote_cpus_update(cs, NULL, xcpus, &tmp);
2371 else if (invalidate)
2372 update_parent_effective_cpumask(cs, partcmd_invalidate,
2373 NULL, &tmp);
2374 else
2375 update_parent_effective_cpumask(cs, partcmd_update,
2376 xcpus, &tmp);
2377 }
2378
2379 spin_lock_irq(&callback_lock);
2380 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2381 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2382 if ((old_prs > 0) && !is_partition_valid(cs))
2383 reset_partition_data(cs);
2384 spin_unlock_irq(&callback_lock);
2385
2386 /* effective_cpus/effective_xcpus will be updated here */
2387 update_cpumasks_hier(cs, &tmp, force);
2388
2389 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2390 if (cs->partition_root_state)
2391 update_partition_sd_lb(cs, old_prs);
2392 out_free:
2393 free_cpumasks(NULL, &tmp);
2394 return retval;
2395 }
2396
2397 /**
2398 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2399 * @cs: the cpuset to consider
2400 * @trialcs: trial cpuset
2401 * @buf: buffer of cpu numbers written to this cpuset
2402 *
2403 * The tasks' cpumask will be updated if cs is a valid partition root.
2404 */
update_exclusive_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2405 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2406 const char *buf)
2407 {
2408 int retval;
2409 struct tmpmasks tmp;
2410 struct cpuset *parent = parent_cs(cs);
2411 bool invalidate = false;
2412 bool force = false;
2413 int old_prs = cs->partition_root_state;
2414
2415 if (!*buf) {
2416 cpumask_clear(trialcs->exclusive_cpus);
2417 cpumask_clear(trialcs->effective_xcpus);
2418 } else {
2419 retval = cpulist_parse(buf, trialcs->exclusive_cpus);
2420 if (retval < 0)
2421 return retval;
2422 }
2423
2424 /* Nothing to do if the CPUs didn't change */
2425 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2426 return 0;
2427
2428 if (*buf) {
2429 trialcs->partition_root_state = PRS_MEMBER;
2430 /*
2431 * Reject the change if there is exclusive CPUs conflict with
2432 * the siblings.
2433 */
2434 if (compute_effective_exclusive_cpumask(trialcs, NULL, cs))
2435 return -EINVAL;
2436 }
2437
2438 /*
2439 * Check all the descendants in update_cpumasks_hier() if
2440 * effective_xcpus is to be changed.
2441 */
2442 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2443
2444 retval = validate_change(cs, trialcs);
2445 if (retval)
2446 return retval;
2447
2448 if (alloc_cpumasks(NULL, &tmp))
2449 return -ENOMEM;
2450
2451 if (old_prs) {
2452 if (cpumask_empty(trialcs->effective_xcpus)) {
2453 invalidate = true;
2454 cs->prs_err = PERR_INVCPUS;
2455 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) {
2456 invalidate = true;
2457 cs->prs_err = PERR_HKEEPING;
2458 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2459 invalidate = true;
2460 cs->prs_err = PERR_NOCPUS;
2461 }
2462
2463 if (is_remote_partition(cs)) {
2464 if (invalidate)
2465 remote_partition_disable(cs, &tmp);
2466 else
2467 remote_cpus_update(cs, trialcs->exclusive_cpus,
2468 trialcs->effective_xcpus, &tmp);
2469 } else if (invalidate) {
2470 update_parent_effective_cpumask(cs, partcmd_invalidate,
2471 NULL, &tmp);
2472 } else {
2473 update_parent_effective_cpumask(cs, partcmd_update,
2474 trialcs->effective_xcpus, &tmp);
2475 }
2476 }
2477 spin_lock_irq(&callback_lock);
2478 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2479 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2480 if ((old_prs > 0) && !is_partition_valid(cs))
2481 reset_partition_data(cs);
2482 spin_unlock_irq(&callback_lock);
2483
2484 /*
2485 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2486 * of the subtree when it is a valid partition root or effective_xcpus
2487 * is updated.
2488 */
2489 if (is_partition_valid(cs) || force)
2490 update_cpumasks_hier(cs, &tmp, force);
2491
2492 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2493 if (cs->partition_root_state)
2494 update_partition_sd_lb(cs, old_prs);
2495
2496 free_cpumasks(NULL, &tmp);
2497 return 0;
2498 }
2499
2500 /*
2501 * Migrate memory region from one set of nodes to another. This is
2502 * performed asynchronously as it can be called from process migration path
2503 * holding locks involved in process management. All mm migrations are
2504 * performed in the queued order and can be waited for by flushing
2505 * cpuset_migrate_mm_wq.
2506 */
2507
2508 struct cpuset_migrate_mm_work {
2509 struct work_struct work;
2510 struct mm_struct *mm;
2511 nodemask_t from;
2512 nodemask_t to;
2513 };
2514
cpuset_migrate_mm_workfn(struct work_struct * work)2515 static void cpuset_migrate_mm_workfn(struct work_struct *work)
2516 {
2517 struct cpuset_migrate_mm_work *mwork =
2518 container_of(work, struct cpuset_migrate_mm_work, work);
2519
2520 /* on a wq worker, no need to worry about %current's mems_allowed */
2521 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2522 mmput(mwork->mm);
2523 kfree(mwork);
2524 }
2525
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)2526 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2527 const nodemask_t *to)
2528 {
2529 struct cpuset_migrate_mm_work *mwork;
2530
2531 if (nodes_equal(*from, *to)) {
2532 mmput(mm);
2533 return;
2534 }
2535
2536 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
2537 if (mwork) {
2538 mwork->mm = mm;
2539 mwork->from = *from;
2540 mwork->to = *to;
2541 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2542 queue_work(cpuset_migrate_mm_wq, &mwork->work);
2543 } else {
2544 mmput(mm);
2545 }
2546 }
2547
cpuset_post_attach(void)2548 static void cpuset_post_attach(void)
2549 {
2550 flush_workqueue(cpuset_migrate_mm_wq);
2551 }
2552
2553 /*
2554 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2555 * @tsk: the task to change
2556 * @newmems: new nodes that the task will be set
2557 *
2558 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2559 * and rebind an eventual tasks' mempolicy. If the task is allocating in
2560 * parallel, it might temporarily see an empty intersection, which results in
2561 * a seqlock check and retry before OOM or allocation failure.
2562 */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)2563 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2564 nodemask_t *newmems)
2565 {
2566 task_lock(tsk);
2567
2568 local_irq_disable();
2569 write_seqcount_begin(&tsk->mems_allowed_seq);
2570
2571 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2572 mpol_rebind_task(tsk, newmems);
2573 tsk->mems_allowed = *newmems;
2574
2575 write_seqcount_end(&tsk->mems_allowed_seq);
2576 local_irq_enable();
2577
2578 task_unlock(tsk);
2579 }
2580
2581 static void *cpuset_being_rebound;
2582
2583 /**
2584 * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2585 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2586 *
2587 * Iterate through each task of @cs updating its mems_allowed to the
2588 * effective cpuset's. As this function is called with cpuset_mutex held,
2589 * cpuset membership stays stable.
2590 */
cpuset_update_tasks_nodemask(struct cpuset * cs)2591 void cpuset_update_tasks_nodemask(struct cpuset *cs)
2592 {
2593 static nodemask_t newmems; /* protected by cpuset_mutex */
2594 struct css_task_iter it;
2595 struct task_struct *task;
2596
2597 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
2598
2599 guarantee_online_mems(cs, &newmems);
2600
2601 /*
2602 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2603 * take while holding tasklist_lock. Forks can happen - the
2604 * mpol_dup() cpuset_being_rebound check will catch such forks,
2605 * and rebind their vma mempolicies too. Because we still hold
2606 * the global cpuset_mutex, we know that no other rebind effort
2607 * will be contending for the global variable cpuset_being_rebound.
2608 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2609 * is idempotent. Also migrate pages in each mm to new nodes.
2610 */
2611 css_task_iter_start(&cs->css, 0, &it);
2612 while ((task = css_task_iter_next(&it))) {
2613 struct mm_struct *mm;
2614 bool migrate;
2615
2616 cpuset_change_task_nodemask(task, &newmems);
2617
2618 mm = get_task_mm(task);
2619 if (!mm)
2620 continue;
2621
2622 migrate = is_memory_migrate(cs);
2623
2624 mpol_rebind_mm(mm, &cs->mems_allowed);
2625 if (migrate)
2626 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2627 else
2628 mmput(mm);
2629 }
2630 css_task_iter_end(&it);
2631
2632 /*
2633 * All the tasks' nodemasks have been updated, update
2634 * cs->old_mems_allowed.
2635 */
2636 cs->old_mems_allowed = newmems;
2637
2638 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
2639 cpuset_being_rebound = NULL;
2640 }
2641
2642 /*
2643 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2644 * @cs: the cpuset to consider
2645 * @new_mems: a temp variable for calculating new effective_mems
2646 *
2647 * When configured nodemask is changed, the effective nodemasks of this cpuset
2648 * and all its descendants need to be updated.
2649 *
2650 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2651 *
2652 * Called with cpuset_mutex held
2653 */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)2654 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2655 {
2656 struct cpuset *cp;
2657 struct cgroup_subsys_state *pos_css;
2658
2659 rcu_read_lock();
2660 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2661 struct cpuset *parent = parent_cs(cp);
2662
2663 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2664
2665 /*
2666 * If it becomes empty, inherit the effective mask of the
2667 * parent, which is guaranteed to have some MEMs.
2668 */
2669 if (is_in_v2_mode() && nodes_empty(*new_mems))
2670 *new_mems = parent->effective_mems;
2671
2672 /* Skip the whole subtree if the nodemask remains the same. */
2673 if (nodes_equal(*new_mems, cp->effective_mems)) {
2674 pos_css = css_rightmost_descendant(pos_css);
2675 continue;
2676 }
2677
2678 if (!css_tryget_online(&cp->css))
2679 continue;
2680 rcu_read_unlock();
2681
2682 spin_lock_irq(&callback_lock);
2683 cp->effective_mems = *new_mems;
2684 spin_unlock_irq(&callback_lock);
2685
2686 WARN_ON(!is_in_v2_mode() &&
2687 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2688
2689 cpuset_update_tasks_nodemask(cp);
2690
2691 rcu_read_lock();
2692 css_put(&cp->css);
2693 }
2694 rcu_read_unlock();
2695 }
2696
2697 /*
2698 * Handle user request to change the 'mems' memory placement
2699 * of a cpuset. Needs to validate the request, update the
2700 * cpusets mems_allowed, and for each task in the cpuset,
2701 * update mems_allowed and rebind task's mempolicy and any vma
2702 * mempolicies and if the cpuset is marked 'memory_migrate',
2703 * migrate the tasks pages to the new memory.
2704 *
2705 * Call with cpuset_mutex held. May take callback_lock during call.
2706 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2707 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2708 * their mempolicies to the cpusets new mems_allowed.
2709 */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2710 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2711 const char *buf)
2712 {
2713 int retval;
2714
2715 /*
2716 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2717 * it's read-only
2718 */
2719 if (cs == &top_cpuset) {
2720 retval = -EACCES;
2721 goto done;
2722 }
2723
2724 /*
2725 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2726 * Since nodelist_parse() fails on an empty mask, we special case
2727 * that parsing. The validate_change() call ensures that cpusets
2728 * with tasks have memory.
2729 */
2730 if (!*buf) {
2731 nodes_clear(trialcs->mems_allowed);
2732 } else {
2733 retval = nodelist_parse(buf, trialcs->mems_allowed);
2734 if (retval < 0)
2735 goto done;
2736
2737 if (!nodes_subset(trialcs->mems_allowed,
2738 top_cpuset.mems_allowed)) {
2739 retval = -EINVAL;
2740 goto done;
2741 }
2742 }
2743
2744 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2745 retval = 0; /* Too easy - nothing to do */
2746 goto done;
2747 }
2748 retval = validate_change(cs, trialcs);
2749 if (retval < 0)
2750 goto done;
2751
2752 check_insane_mems_config(&trialcs->mems_allowed);
2753
2754 spin_lock_irq(&callback_lock);
2755 cs->mems_allowed = trialcs->mems_allowed;
2756 spin_unlock_irq(&callback_lock);
2757
2758 /* use trialcs->mems_allowed as a temp variable */
2759 update_nodemasks_hier(cs, &trialcs->mems_allowed);
2760 done:
2761 return retval;
2762 }
2763
current_cpuset_is_being_rebound(void)2764 bool current_cpuset_is_being_rebound(void)
2765 {
2766 bool ret;
2767
2768 rcu_read_lock();
2769 ret = task_cs(current) == cpuset_being_rebound;
2770 rcu_read_unlock();
2771
2772 return ret;
2773 }
2774
2775 /*
2776 * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2777 * bit: the bit to update (see cpuset_flagbits_t)
2778 * cs: the cpuset to update
2779 * turning_on: whether the flag is being set or cleared
2780 *
2781 * Call with cpuset_mutex held.
2782 */
2783
cpuset_update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)2784 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2785 int turning_on)
2786 {
2787 struct cpuset *trialcs;
2788 int balance_flag_changed;
2789 int spread_flag_changed;
2790 int err;
2791
2792 trialcs = alloc_trial_cpuset(cs);
2793 if (!trialcs)
2794 return -ENOMEM;
2795
2796 if (turning_on)
2797 set_bit(bit, &trialcs->flags);
2798 else
2799 clear_bit(bit, &trialcs->flags);
2800
2801 err = validate_change(cs, trialcs);
2802 if (err < 0)
2803 goto out;
2804
2805 balance_flag_changed = (is_sched_load_balance(cs) !=
2806 is_sched_load_balance(trialcs));
2807
2808 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2809 || (is_spread_page(cs) != is_spread_page(trialcs)));
2810
2811 spin_lock_irq(&callback_lock);
2812 cs->flags = trialcs->flags;
2813 spin_unlock_irq(&callback_lock);
2814
2815 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
2816 if (cpuset_v2())
2817 cpuset_force_rebuild();
2818 else
2819 rebuild_sched_domains_locked();
2820 }
2821
2822 if (spread_flag_changed)
2823 cpuset1_update_tasks_flags(cs);
2824 out:
2825 free_cpuset(trialcs);
2826 return err;
2827 }
2828
2829 /**
2830 * update_prstate - update partition_root_state
2831 * @cs: the cpuset to update
2832 * @new_prs: new partition root state
2833 * Return: 0 if successful, != 0 if error
2834 *
2835 * Call with cpuset_mutex held.
2836 */
update_prstate(struct cpuset * cs,int new_prs)2837 static int update_prstate(struct cpuset *cs, int new_prs)
2838 {
2839 int err = PERR_NONE, old_prs = cs->partition_root_state;
2840 struct cpuset *parent = parent_cs(cs);
2841 struct tmpmasks tmpmask;
2842 bool isolcpus_updated = false;
2843
2844 if (old_prs == new_prs)
2845 return 0;
2846
2847 /*
2848 * Treat a previously invalid partition root as if it is a "member".
2849 */
2850 if (new_prs && is_prs_invalid(old_prs))
2851 old_prs = PRS_MEMBER;
2852
2853 if (alloc_cpumasks(NULL, &tmpmask))
2854 return -ENOMEM;
2855
2856 err = update_partition_exclusive_flag(cs, new_prs);
2857 if (err)
2858 goto out;
2859
2860 if (!old_prs) {
2861 /*
2862 * cpus_allowed and exclusive_cpus cannot be both empty.
2863 */
2864 if (xcpus_empty(cs)) {
2865 err = PERR_CPUSEMPTY;
2866 goto out;
2867 }
2868
2869 /*
2870 * We don't support the creation of a new local partition with
2871 * a remote partition underneath it. This unsupported
2872 * setting can happen only if parent is the top_cpuset because
2873 * a remote partition cannot be created underneath an existing
2874 * local or remote partition.
2875 */
2876 if ((parent == &top_cpuset) &&
2877 cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
2878 err = PERR_REMOTE;
2879 goto out;
2880 }
2881
2882 /*
2883 * If parent is valid partition, enable local partiion.
2884 * Otherwise, enable a remote partition.
2885 */
2886 if (is_partition_valid(parent)) {
2887 enum partition_cmd cmd = (new_prs == PRS_ROOT)
2888 ? partcmd_enable : partcmd_enablei;
2889
2890 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
2891 } else {
2892 err = remote_partition_enable(cs, new_prs, &tmpmask);
2893 }
2894 } else if (old_prs && new_prs) {
2895 /*
2896 * A change in load balance state only, no change in cpumasks.
2897 * Need to update isolated_cpus.
2898 */
2899 isolcpus_updated = true;
2900 } else {
2901 /*
2902 * Switching back to member is always allowed even if it
2903 * disables child partitions.
2904 */
2905 if (is_remote_partition(cs))
2906 remote_partition_disable(cs, &tmpmask);
2907 else
2908 update_parent_effective_cpumask(cs, partcmd_disable,
2909 NULL, &tmpmask);
2910
2911 /*
2912 * Invalidation of child partitions will be done in
2913 * update_cpumasks_hier().
2914 */
2915 }
2916 out:
2917 /*
2918 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2919 * happens.
2920 */
2921 if (err) {
2922 new_prs = -new_prs;
2923 update_partition_exclusive_flag(cs, new_prs);
2924 }
2925
2926 spin_lock_irq(&callback_lock);
2927 cs->partition_root_state = new_prs;
2928 WRITE_ONCE(cs->prs_err, err);
2929 if (!is_partition_valid(cs))
2930 reset_partition_data(cs);
2931 else if (isolcpus_updated)
2932 isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
2933 spin_unlock_irq(&callback_lock);
2934 update_unbound_workqueue_cpumask(isolcpus_updated);
2935
2936 /* Force update if switching back to member & update effective_xcpus */
2937 update_cpumasks_hier(cs, &tmpmask, !new_prs);
2938
2939 /* A newly created partition must have effective_xcpus set */
2940 WARN_ON_ONCE(!old_prs && (new_prs > 0)
2941 && cpumask_empty(cs->effective_xcpus));
2942
2943 /* Update sched domains and load balance flag */
2944 update_partition_sd_lb(cs, old_prs);
2945
2946 notify_partition_change(cs, old_prs);
2947 if (force_sd_rebuild)
2948 rebuild_sched_domains_locked();
2949 free_cpumasks(NULL, &tmpmask);
2950 return 0;
2951 }
2952
2953 static struct cpuset *cpuset_attach_old_cs;
2954
2955 /*
2956 * Check to see if a cpuset can accept a new task
2957 * For v1, cpus_allowed and mems_allowed can't be empty.
2958 * For v2, effective_cpus can't be empty.
2959 * Note that in v1, effective_cpus = cpus_allowed.
2960 */
cpuset_can_attach_check(struct cpuset * cs)2961 static int cpuset_can_attach_check(struct cpuset *cs)
2962 {
2963 if (cpumask_empty(cs->effective_cpus) ||
2964 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2965 return -ENOSPC;
2966 return 0;
2967 }
2968
reset_migrate_dl_data(struct cpuset * cs)2969 static void reset_migrate_dl_data(struct cpuset *cs)
2970 {
2971 cs->nr_migrate_dl_tasks = 0;
2972 cs->sum_migrate_dl_bw = 0;
2973 }
2974
2975 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2976 static int cpuset_can_attach(struct cgroup_taskset *tset)
2977 {
2978 struct cgroup_subsys_state *css;
2979 struct cpuset *cs, *oldcs;
2980 struct task_struct *task;
2981 bool cpus_updated, mems_updated;
2982 int ret;
2983
2984 /* used later by cpuset_attach() */
2985 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2986 oldcs = cpuset_attach_old_cs;
2987 cs = css_cs(css);
2988
2989 mutex_lock(&cpuset_mutex);
2990
2991 /* Check to see if task is allowed in the cpuset */
2992 ret = cpuset_can_attach_check(cs);
2993 if (ret)
2994 goto out_unlock;
2995
2996 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
2997 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2998
2999 cgroup_taskset_for_each(task, css, tset) {
3000 ret = task_can_attach(task);
3001 if (ret)
3002 goto out_unlock;
3003
3004 /*
3005 * Skip rights over task check in v2 when nothing changes,
3006 * migration permission derives from hierarchy ownership in
3007 * cgroup_procs_write_permission()).
3008 */
3009 if (!cpuset_v2() || (cpus_updated || mems_updated)) {
3010 ret = security_task_setscheduler(task);
3011 if (ret)
3012 goto out_unlock;
3013 }
3014
3015 if (dl_task(task)) {
3016 cs->nr_migrate_dl_tasks++;
3017 cs->sum_migrate_dl_bw += task->dl.dl_bw;
3018 }
3019 }
3020
3021 if (!cs->nr_migrate_dl_tasks)
3022 goto out_success;
3023
3024 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
3025 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
3026
3027 if (unlikely(cpu >= nr_cpu_ids)) {
3028 reset_migrate_dl_data(cs);
3029 ret = -EINVAL;
3030 goto out_unlock;
3031 }
3032
3033 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
3034 if (ret) {
3035 reset_migrate_dl_data(cs);
3036 goto out_unlock;
3037 }
3038 }
3039
3040 out_success:
3041 /*
3042 * Mark attach is in progress. This makes validate_change() fail
3043 * changes which zero cpus/mems_allowed.
3044 */
3045 cs->attach_in_progress++;
3046 out_unlock:
3047 mutex_unlock(&cpuset_mutex);
3048 return ret;
3049 }
3050
cpuset_cancel_attach(struct cgroup_taskset * tset)3051 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
3052 {
3053 struct cgroup_subsys_state *css;
3054 struct cpuset *cs;
3055
3056 cgroup_taskset_first(tset, &css);
3057 cs = css_cs(css);
3058
3059 mutex_lock(&cpuset_mutex);
3060 dec_attach_in_progress_locked(cs);
3061
3062 if (cs->nr_migrate_dl_tasks) {
3063 int cpu = cpumask_any(cs->effective_cpus);
3064
3065 dl_bw_free(cpu, cs->sum_migrate_dl_bw);
3066 reset_migrate_dl_data(cs);
3067 }
3068
3069 mutex_unlock(&cpuset_mutex);
3070 }
3071
3072 /*
3073 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
3074 * but we can't allocate it dynamically there. Define it global and
3075 * allocate from cpuset_init().
3076 */
3077 static cpumask_var_t cpus_attach;
3078 static nodemask_t cpuset_attach_nodemask_to;
3079
cpuset_attach_task(struct cpuset * cs,struct task_struct * task)3080 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3081 {
3082 lockdep_assert_held(&cpuset_mutex);
3083
3084 if (cs != &top_cpuset)
3085 guarantee_online_cpus(task, cpus_attach);
3086 else
3087 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3088 subpartitions_cpus);
3089 /*
3090 * can_attach beforehand should guarantee that this doesn't
3091 * fail. TODO: have a better way to handle failure here
3092 */
3093 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3094
3095 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3096 cpuset1_update_task_spread_flags(cs, task);
3097 }
3098
cpuset_attach(struct cgroup_taskset * tset)3099 static void cpuset_attach(struct cgroup_taskset *tset)
3100 {
3101 struct task_struct *task;
3102 struct task_struct *leader;
3103 struct cgroup_subsys_state *css;
3104 struct cpuset *cs;
3105 struct cpuset *oldcs = cpuset_attach_old_cs;
3106 bool cpus_updated, mems_updated;
3107
3108 cgroup_taskset_first(tset, &css);
3109 cs = css_cs(css);
3110
3111 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
3112 mutex_lock(&cpuset_mutex);
3113 cpus_updated = !cpumask_equal(cs->effective_cpus,
3114 oldcs->effective_cpus);
3115 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3116
3117 /*
3118 * In the default hierarchy, enabling cpuset in the child cgroups
3119 * will trigger a number of cpuset_attach() calls with no change
3120 * in effective cpus and mems. In that case, we can optimize out
3121 * by skipping the task iteration and update.
3122 */
3123 if (cpuset_v2() && !cpus_updated && !mems_updated) {
3124 cpuset_attach_nodemask_to = cs->effective_mems;
3125 goto out;
3126 }
3127
3128 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3129
3130 cgroup_taskset_for_each(task, css, tset)
3131 cpuset_attach_task(cs, task);
3132
3133 /*
3134 * Change mm for all threadgroup leaders. This is expensive and may
3135 * sleep and should be moved outside migration path proper. Skip it
3136 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3137 * not set.
3138 */
3139 cpuset_attach_nodemask_to = cs->effective_mems;
3140 if (!is_memory_migrate(cs) && !mems_updated)
3141 goto out;
3142
3143 cgroup_taskset_for_each_leader(leader, css, tset) {
3144 struct mm_struct *mm = get_task_mm(leader);
3145
3146 if (mm) {
3147 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3148
3149 /*
3150 * old_mems_allowed is the same with mems_allowed
3151 * here, except if this task is being moved
3152 * automatically due to hotplug. In that case
3153 * @mems_allowed has been updated and is empty, so
3154 * @old_mems_allowed is the right nodesets that we
3155 * migrate mm from.
3156 */
3157 if (is_memory_migrate(cs))
3158 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3159 &cpuset_attach_nodemask_to);
3160 else
3161 mmput(mm);
3162 }
3163 }
3164
3165 out:
3166 cs->old_mems_allowed = cpuset_attach_nodemask_to;
3167
3168 if (cs->nr_migrate_dl_tasks) {
3169 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3170 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3171 reset_migrate_dl_data(cs);
3172 }
3173
3174 dec_attach_in_progress_locked(cs);
3175
3176 mutex_unlock(&cpuset_mutex);
3177 }
3178
3179 /*
3180 * Common handling for a write to a "cpus" or "mems" file.
3181 */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3182 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3183 char *buf, size_t nbytes, loff_t off)
3184 {
3185 struct cpuset *cs = css_cs(of_css(of));
3186 struct cpuset *trialcs;
3187 int retval = -ENODEV;
3188
3189 buf = strstrip(buf);
3190 cpus_read_lock();
3191 mutex_lock(&cpuset_mutex);
3192 if (!is_cpuset_online(cs))
3193 goto out_unlock;
3194
3195 trialcs = alloc_trial_cpuset(cs);
3196 if (!trialcs) {
3197 retval = -ENOMEM;
3198 goto out_unlock;
3199 }
3200
3201 switch (of_cft(of)->private) {
3202 case FILE_CPULIST:
3203 retval = update_cpumask(cs, trialcs, buf);
3204 break;
3205 case FILE_EXCLUSIVE_CPULIST:
3206 retval = update_exclusive_cpumask(cs, trialcs, buf);
3207 break;
3208 case FILE_MEMLIST:
3209 retval = update_nodemask(cs, trialcs, buf);
3210 break;
3211 default:
3212 retval = -EINVAL;
3213 break;
3214 }
3215
3216 free_cpuset(trialcs);
3217 if (force_sd_rebuild)
3218 rebuild_sched_domains_locked();
3219 out_unlock:
3220 mutex_unlock(&cpuset_mutex);
3221 cpus_read_unlock();
3222 flush_workqueue(cpuset_migrate_mm_wq);
3223 return retval ?: nbytes;
3224 }
3225
3226 /*
3227 * These ascii lists should be read in a single call, by using a user
3228 * buffer large enough to hold the entire map. If read in smaller
3229 * chunks, there is no guarantee of atomicity. Since the display format
3230 * used, list of ranges of sequential numbers, is variable length,
3231 * and since these maps can change value dynamically, one could read
3232 * gibberish by doing partial reads while a list was changing.
3233 */
cpuset_common_seq_show(struct seq_file * sf,void * v)3234 int cpuset_common_seq_show(struct seq_file *sf, void *v)
3235 {
3236 struct cpuset *cs = css_cs(seq_css(sf));
3237 cpuset_filetype_t type = seq_cft(sf)->private;
3238 int ret = 0;
3239
3240 spin_lock_irq(&callback_lock);
3241
3242 switch (type) {
3243 case FILE_CPULIST:
3244 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3245 break;
3246 case FILE_MEMLIST:
3247 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3248 break;
3249 case FILE_EFFECTIVE_CPULIST:
3250 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3251 break;
3252 case FILE_EFFECTIVE_MEMLIST:
3253 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3254 break;
3255 case FILE_EXCLUSIVE_CPULIST:
3256 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3257 break;
3258 case FILE_EFFECTIVE_XCPULIST:
3259 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3260 break;
3261 case FILE_SUBPARTS_CPULIST:
3262 seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3263 break;
3264 case FILE_ISOLATED_CPULIST:
3265 seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3266 break;
3267 default:
3268 ret = -EINVAL;
3269 }
3270
3271 spin_unlock_irq(&callback_lock);
3272 return ret;
3273 }
3274
cpuset_partition_show(struct seq_file * seq,void * v)3275 static int cpuset_partition_show(struct seq_file *seq, void *v)
3276 {
3277 struct cpuset *cs = css_cs(seq_css(seq));
3278 const char *err, *type = NULL;
3279
3280 switch (cs->partition_root_state) {
3281 case PRS_ROOT:
3282 seq_puts(seq, "root\n");
3283 break;
3284 case PRS_ISOLATED:
3285 seq_puts(seq, "isolated\n");
3286 break;
3287 case PRS_MEMBER:
3288 seq_puts(seq, "member\n");
3289 break;
3290 case PRS_INVALID_ROOT:
3291 type = "root";
3292 fallthrough;
3293 case PRS_INVALID_ISOLATED:
3294 if (!type)
3295 type = "isolated";
3296 err = perr_strings[READ_ONCE(cs->prs_err)];
3297 if (err)
3298 seq_printf(seq, "%s invalid (%s)\n", type, err);
3299 else
3300 seq_printf(seq, "%s invalid\n", type);
3301 break;
3302 }
3303 return 0;
3304 }
3305
cpuset_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3306 static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
3307 size_t nbytes, loff_t off)
3308 {
3309 struct cpuset *cs = css_cs(of_css(of));
3310 int val;
3311 int retval = -ENODEV;
3312
3313 buf = strstrip(buf);
3314
3315 if (!strcmp(buf, "root"))
3316 val = PRS_ROOT;
3317 else if (!strcmp(buf, "member"))
3318 val = PRS_MEMBER;
3319 else if (!strcmp(buf, "isolated"))
3320 val = PRS_ISOLATED;
3321 else
3322 return -EINVAL;
3323
3324 css_get(&cs->css);
3325 cpus_read_lock();
3326 mutex_lock(&cpuset_mutex);
3327 if (is_cpuset_online(cs))
3328 retval = update_prstate(cs, val);
3329 mutex_unlock(&cpuset_mutex);
3330 cpus_read_unlock();
3331 css_put(&cs->css);
3332 return retval ?: nbytes;
3333 }
3334
3335 /*
3336 * This is currently a minimal set for the default hierarchy. It can be
3337 * expanded later on by migrating more features and control files from v1.
3338 */
3339 static struct cftype dfl_files[] = {
3340 {
3341 .name = "cpus",
3342 .seq_show = cpuset_common_seq_show,
3343 .write = cpuset_write_resmask,
3344 .max_write_len = (100U + 6 * NR_CPUS),
3345 .private = FILE_CPULIST,
3346 .flags = CFTYPE_NOT_ON_ROOT,
3347 },
3348
3349 {
3350 .name = "mems",
3351 .seq_show = cpuset_common_seq_show,
3352 .write = cpuset_write_resmask,
3353 .max_write_len = (100U + 6 * MAX_NUMNODES),
3354 .private = FILE_MEMLIST,
3355 .flags = CFTYPE_NOT_ON_ROOT,
3356 },
3357
3358 {
3359 .name = "cpus.effective",
3360 .seq_show = cpuset_common_seq_show,
3361 .private = FILE_EFFECTIVE_CPULIST,
3362 },
3363
3364 {
3365 .name = "mems.effective",
3366 .seq_show = cpuset_common_seq_show,
3367 .private = FILE_EFFECTIVE_MEMLIST,
3368 },
3369
3370 {
3371 .name = "cpus.partition",
3372 .seq_show = cpuset_partition_show,
3373 .write = cpuset_partition_write,
3374 .private = FILE_PARTITION_ROOT,
3375 .flags = CFTYPE_NOT_ON_ROOT,
3376 .file_offset = offsetof(struct cpuset, partition_file),
3377 },
3378
3379 {
3380 .name = "cpus.exclusive",
3381 .seq_show = cpuset_common_seq_show,
3382 .write = cpuset_write_resmask,
3383 .max_write_len = (100U + 6 * NR_CPUS),
3384 .private = FILE_EXCLUSIVE_CPULIST,
3385 .flags = CFTYPE_NOT_ON_ROOT,
3386 },
3387
3388 {
3389 .name = "cpus.exclusive.effective",
3390 .seq_show = cpuset_common_seq_show,
3391 .private = FILE_EFFECTIVE_XCPULIST,
3392 .flags = CFTYPE_NOT_ON_ROOT,
3393 },
3394
3395 {
3396 .name = "cpus.subpartitions",
3397 .seq_show = cpuset_common_seq_show,
3398 .private = FILE_SUBPARTS_CPULIST,
3399 .flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3400 },
3401
3402 {
3403 .name = "cpus.isolated",
3404 .seq_show = cpuset_common_seq_show,
3405 .private = FILE_ISOLATED_CPULIST,
3406 .flags = CFTYPE_ONLY_ON_ROOT,
3407 },
3408
3409 { } /* terminate */
3410 };
3411
3412
3413 /**
3414 * cpuset_css_alloc - Allocate a cpuset css
3415 * @parent_css: Parent css of the control group that the new cpuset will be
3416 * part of
3417 * Return: cpuset css on success, -ENOMEM on failure.
3418 *
3419 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3420 * top cpuset css otherwise.
3421 */
3422 static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)3423 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3424 {
3425 struct cpuset *cs;
3426
3427 if (!parent_css)
3428 return &top_cpuset.css;
3429
3430 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
3431 if (!cs)
3432 return ERR_PTR(-ENOMEM);
3433
3434 if (alloc_cpumasks(cs, NULL)) {
3435 kfree(cs);
3436 return ERR_PTR(-ENOMEM);
3437 }
3438
3439 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3440 fmeter_init(&cs->fmeter);
3441 cs->relax_domain_level = -1;
3442 INIT_LIST_HEAD(&cs->remote_sibling);
3443
3444 /* Set CS_MEMORY_MIGRATE for default hierarchy */
3445 if (cpuset_v2())
3446 __set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3447
3448 return &cs->css;
3449 }
3450
cpuset_css_online(struct cgroup_subsys_state * css)3451 static int cpuset_css_online(struct cgroup_subsys_state *css)
3452 {
3453 struct cpuset *cs = css_cs(css);
3454 struct cpuset *parent = parent_cs(cs);
3455 struct cpuset *tmp_cs;
3456 struct cgroup_subsys_state *pos_css;
3457
3458 if (!parent)
3459 return 0;
3460
3461 cpus_read_lock();
3462 mutex_lock(&cpuset_mutex);
3463
3464 set_bit(CS_ONLINE, &cs->flags);
3465 if (is_spread_page(parent))
3466 set_bit(CS_SPREAD_PAGE, &cs->flags);
3467 if (is_spread_slab(parent))
3468 set_bit(CS_SPREAD_SLAB, &cs->flags);
3469 /*
3470 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3471 */
3472 if (cpuset_v2() && !is_sched_load_balance(parent))
3473 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3474
3475 cpuset_inc();
3476
3477 spin_lock_irq(&callback_lock);
3478 if (is_in_v2_mode()) {
3479 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3480 cs->effective_mems = parent->effective_mems;
3481 }
3482 spin_unlock_irq(&callback_lock);
3483
3484 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3485 goto out_unlock;
3486
3487 /*
3488 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3489 * set. This flag handling is implemented in cgroup core for
3490 * historical reasons - the flag may be specified during mount.
3491 *
3492 * Currently, if any sibling cpusets have exclusive cpus or mem, we
3493 * refuse to clone the configuration - thereby refusing the task to
3494 * be entered, and as a result refusing the sys_unshare() or
3495 * clone() which initiated it. If this becomes a problem for some
3496 * users who wish to allow that scenario, then this could be
3497 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3498 * (and likewise for mems) to the new cgroup.
3499 */
3500 rcu_read_lock();
3501 cpuset_for_each_child(tmp_cs, pos_css, parent) {
3502 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3503 rcu_read_unlock();
3504 goto out_unlock;
3505 }
3506 }
3507 rcu_read_unlock();
3508
3509 spin_lock_irq(&callback_lock);
3510 cs->mems_allowed = parent->mems_allowed;
3511 cs->effective_mems = parent->mems_allowed;
3512 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3513 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3514 spin_unlock_irq(&callback_lock);
3515 out_unlock:
3516 mutex_unlock(&cpuset_mutex);
3517 cpus_read_unlock();
3518 return 0;
3519 }
3520
3521 /*
3522 * If the cpuset being removed has its flag 'sched_load_balance'
3523 * enabled, then simulate turning sched_load_balance off, which
3524 * will call rebuild_sched_domains_locked(). That is not needed
3525 * in the default hierarchy where only changes in partition
3526 * will cause repartitioning.
3527 *
3528 * If the cpuset has the 'sched.partition' flag enabled, simulate
3529 * turning 'sched.partition" off.
3530 */
3531
cpuset_css_offline(struct cgroup_subsys_state * css)3532 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3533 {
3534 struct cpuset *cs = css_cs(css);
3535
3536 cpus_read_lock();
3537 mutex_lock(&cpuset_mutex);
3538
3539 if (!cpuset_v2() && is_sched_load_balance(cs))
3540 cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3541
3542 cpuset_dec();
3543 clear_bit(CS_ONLINE, &cs->flags);
3544
3545 mutex_unlock(&cpuset_mutex);
3546 cpus_read_unlock();
3547 }
3548
cpuset_css_killed(struct cgroup_subsys_state * css)3549 static void cpuset_css_killed(struct cgroup_subsys_state *css)
3550 {
3551 struct cpuset *cs = css_cs(css);
3552
3553 cpus_read_lock();
3554 mutex_lock(&cpuset_mutex);
3555
3556 /* Reset valid partition back to member */
3557 if (is_partition_valid(cs))
3558 update_prstate(cs, PRS_MEMBER);
3559
3560 mutex_unlock(&cpuset_mutex);
3561 cpus_read_unlock();
3562
3563 }
3564
cpuset_css_free(struct cgroup_subsys_state * css)3565 static void cpuset_css_free(struct cgroup_subsys_state *css)
3566 {
3567 struct cpuset *cs = css_cs(css);
3568
3569 free_cpuset(cs);
3570 }
3571
cpuset_bind(struct cgroup_subsys_state * root_css)3572 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3573 {
3574 mutex_lock(&cpuset_mutex);
3575 spin_lock_irq(&callback_lock);
3576
3577 if (is_in_v2_mode()) {
3578 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3579 cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
3580 top_cpuset.mems_allowed = node_possible_map;
3581 } else {
3582 cpumask_copy(top_cpuset.cpus_allowed,
3583 top_cpuset.effective_cpus);
3584 top_cpuset.mems_allowed = top_cpuset.effective_mems;
3585 }
3586
3587 spin_unlock_irq(&callback_lock);
3588 mutex_unlock(&cpuset_mutex);
3589 }
3590
3591 /*
3592 * In case the child is cloned into a cpuset different from its parent,
3593 * additional checks are done to see if the move is allowed.
3594 */
cpuset_can_fork(struct task_struct * task,struct css_set * cset)3595 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3596 {
3597 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3598 bool same_cs;
3599 int ret;
3600
3601 rcu_read_lock();
3602 same_cs = (cs == task_cs(current));
3603 rcu_read_unlock();
3604
3605 if (same_cs)
3606 return 0;
3607
3608 lockdep_assert_held(&cgroup_mutex);
3609 mutex_lock(&cpuset_mutex);
3610
3611 /* Check to see if task is allowed in the cpuset */
3612 ret = cpuset_can_attach_check(cs);
3613 if (ret)
3614 goto out_unlock;
3615
3616 ret = task_can_attach(task);
3617 if (ret)
3618 goto out_unlock;
3619
3620 ret = security_task_setscheduler(task);
3621 if (ret)
3622 goto out_unlock;
3623
3624 /*
3625 * Mark attach is in progress. This makes validate_change() fail
3626 * changes which zero cpus/mems_allowed.
3627 */
3628 cs->attach_in_progress++;
3629 out_unlock:
3630 mutex_unlock(&cpuset_mutex);
3631 return ret;
3632 }
3633
cpuset_cancel_fork(struct task_struct * task,struct css_set * cset)3634 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3635 {
3636 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3637 bool same_cs;
3638
3639 rcu_read_lock();
3640 same_cs = (cs == task_cs(current));
3641 rcu_read_unlock();
3642
3643 if (same_cs)
3644 return;
3645
3646 dec_attach_in_progress(cs);
3647 }
3648
3649 /*
3650 * Make sure the new task conform to the current state of its parent,
3651 * which could have been changed by cpuset just after it inherits the
3652 * state from the parent and before it sits on the cgroup's task list.
3653 */
cpuset_fork(struct task_struct * task)3654 static void cpuset_fork(struct task_struct *task)
3655 {
3656 struct cpuset *cs;
3657 bool same_cs;
3658
3659 rcu_read_lock();
3660 cs = task_cs(task);
3661 same_cs = (cs == task_cs(current));
3662 rcu_read_unlock();
3663
3664 if (same_cs) {
3665 if (cs == &top_cpuset)
3666 return;
3667
3668 set_cpus_allowed_ptr(task, current->cpus_ptr);
3669 task->mems_allowed = current->mems_allowed;
3670 return;
3671 }
3672
3673 /* CLONE_INTO_CGROUP */
3674 mutex_lock(&cpuset_mutex);
3675 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3676 cpuset_attach_task(cs, task);
3677
3678 dec_attach_in_progress_locked(cs);
3679 mutex_unlock(&cpuset_mutex);
3680 }
3681
3682 struct cgroup_subsys cpuset_cgrp_subsys = {
3683 .css_alloc = cpuset_css_alloc,
3684 .css_online = cpuset_css_online,
3685 .css_offline = cpuset_css_offline,
3686 .css_killed = cpuset_css_killed,
3687 .css_free = cpuset_css_free,
3688 .can_attach = cpuset_can_attach,
3689 .cancel_attach = cpuset_cancel_attach,
3690 .attach = cpuset_attach,
3691 .post_attach = cpuset_post_attach,
3692 .bind = cpuset_bind,
3693 .can_fork = cpuset_can_fork,
3694 .cancel_fork = cpuset_cancel_fork,
3695 .fork = cpuset_fork,
3696 #ifdef CONFIG_CPUSETS_V1
3697 .legacy_cftypes = cpuset1_files,
3698 #endif
3699 .dfl_cftypes = dfl_files,
3700 .early_init = true,
3701 .threaded = true,
3702 };
3703
3704 /**
3705 * cpuset_init - initialize cpusets at system boot
3706 *
3707 * Description: Initialize top_cpuset
3708 **/
3709
cpuset_init(void)3710 int __init cpuset_init(void)
3711 {
3712 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3713 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3714 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
3715 BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
3716 BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
3717 BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
3718
3719 cpumask_setall(top_cpuset.cpus_allowed);
3720 nodes_setall(top_cpuset.mems_allowed);
3721 cpumask_setall(top_cpuset.effective_cpus);
3722 cpumask_setall(top_cpuset.effective_xcpus);
3723 cpumask_setall(top_cpuset.exclusive_cpus);
3724 nodes_setall(top_cpuset.effective_mems);
3725
3726 fmeter_init(&top_cpuset.fmeter);
3727 INIT_LIST_HEAD(&remote_children);
3728
3729 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3730
3731 have_boot_isolcpus = housekeeping_enabled(HK_TYPE_DOMAIN);
3732 if (have_boot_isolcpus) {
3733 BUG_ON(!alloc_cpumask_var(&boot_hk_cpus, GFP_KERNEL));
3734 cpumask_copy(boot_hk_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN));
3735 cpumask_andnot(isolated_cpus, cpu_possible_mask, boot_hk_cpus);
3736 }
3737
3738 return 0;
3739 }
3740
3741 static void
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3742 hotplug_update_tasks(struct cpuset *cs,
3743 struct cpumask *new_cpus, nodemask_t *new_mems,
3744 bool cpus_updated, bool mems_updated)
3745 {
3746 /* A partition root is allowed to have empty effective cpus */
3747 if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3748 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3749 if (nodes_empty(*new_mems))
3750 *new_mems = parent_cs(cs)->effective_mems;
3751
3752 spin_lock_irq(&callback_lock);
3753 cpumask_copy(cs->effective_cpus, new_cpus);
3754 cs->effective_mems = *new_mems;
3755 spin_unlock_irq(&callback_lock);
3756
3757 if (cpus_updated)
3758 cpuset_update_tasks_cpumask(cs, new_cpus);
3759 if (mems_updated)
3760 cpuset_update_tasks_nodemask(cs);
3761 }
3762
cpuset_force_rebuild(void)3763 void cpuset_force_rebuild(void)
3764 {
3765 force_sd_rebuild = true;
3766 }
3767
3768 /**
3769 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3770 * @cs: cpuset in interest
3771 * @tmp: the tmpmasks structure pointer
3772 *
3773 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3774 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3775 * all its tasks are moved to the nearest ancestor with both resources.
3776 */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3777 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3778 {
3779 static cpumask_t new_cpus;
3780 static nodemask_t new_mems;
3781 bool cpus_updated;
3782 bool mems_updated;
3783 bool remote;
3784 int partcmd = -1;
3785 struct cpuset *parent;
3786 retry:
3787 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3788
3789 mutex_lock(&cpuset_mutex);
3790
3791 /*
3792 * We have raced with task attaching. We wait until attaching
3793 * is finished, so we won't attach a task to an empty cpuset.
3794 */
3795 if (cs->attach_in_progress) {
3796 mutex_unlock(&cpuset_mutex);
3797 goto retry;
3798 }
3799
3800 parent = parent_cs(cs);
3801 compute_effective_cpumask(&new_cpus, cs, parent);
3802 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3803
3804 if (!tmp || !cs->partition_root_state)
3805 goto update_tasks;
3806
3807 /*
3808 * Compute effective_cpus for valid partition root, may invalidate
3809 * child partition roots if necessary.
3810 */
3811 remote = is_remote_partition(cs);
3812 if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
3813 compute_partition_effective_cpumask(cs, &new_cpus);
3814
3815 if (remote && cpumask_empty(&new_cpus) &&
3816 partition_is_populated(cs, NULL)) {
3817 cs->prs_err = PERR_HOTPLUG;
3818 remote_partition_disable(cs, tmp);
3819 compute_effective_cpumask(&new_cpus, cs, parent);
3820 remote = false;
3821 }
3822
3823 /*
3824 * Force the partition to become invalid if either one of
3825 * the following conditions hold:
3826 * 1) empty effective cpus but not valid empty partition.
3827 * 2) parent is invalid or doesn't grant any cpus to child
3828 * partitions.
3829 */
3830 if (is_local_partition(cs) && (!is_partition_valid(parent) ||
3831 tasks_nocpu_error(parent, cs, &new_cpus)))
3832 partcmd = partcmd_invalidate;
3833 /*
3834 * On the other hand, an invalid partition root may be transitioned
3835 * back to a regular one.
3836 */
3837 else if (is_partition_valid(parent) && is_partition_invalid(cs))
3838 partcmd = partcmd_update;
3839
3840 if (partcmd >= 0) {
3841 update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
3842 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
3843 compute_partition_effective_cpumask(cs, &new_cpus);
3844 cpuset_force_rebuild();
3845 }
3846 }
3847
3848 update_tasks:
3849 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3850 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3851 if (!cpus_updated && !mems_updated)
3852 goto unlock; /* Hotplug doesn't affect this cpuset */
3853
3854 if (mems_updated)
3855 check_insane_mems_config(&new_mems);
3856
3857 if (is_in_v2_mode())
3858 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3859 cpus_updated, mems_updated);
3860 else
3861 cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
3862 cpus_updated, mems_updated);
3863
3864 unlock:
3865 mutex_unlock(&cpuset_mutex);
3866 }
3867
3868 /**
3869 * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3870 *
3871 * This function is called after either CPU or memory configuration has
3872 * changed and updates cpuset accordingly. The top_cpuset is always
3873 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3874 * order to make cpusets transparent (of no affect) on systems that are
3875 * actively using CPU hotplug but making no active use of cpusets.
3876 *
3877 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3878 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3879 * all descendants.
3880 *
3881 * Note that CPU offlining during suspend is ignored. We don't modify
3882 * cpusets across suspend/resume cycles at all.
3883 *
3884 * CPU / memory hotplug is handled synchronously.
3885 */
cpuset_handle_hotplug(void)3886 static void cpuset_handle_hotplug(void)
3887 {
3888 static cpumask_t new_cpus;
3889 static nodemask_t new_mems;
3890 bool cpus_updated, mems_updated;
3891 bool on_dfl = is_in_v2_mode();
3892 struct tmpmasks tmp, *ptmp = NULL;
3893
3894 if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3895 ptmp = &tmp;
3896
3897 lockdep_assert_cpus_held();
3898 mutex_lock(&cpuset_mutex);
3899
3900 /* fetch the available cpus/mems and find out which changed how */
3901 cpumask_copy(&new_cpus, cpu_active_mask);
3902 new_mems = node_states[N_MEMORY];
3903
3904 /*
3905 * If subpartitions_cpus is populated, it is likely that the check
3906 * below will produce a false positive on cpus_updated when the cpu
3907 * list isn't changed. It is extra work, but it is better to be safe.
3908 */
3909 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
3910 !cpumask_empty(subpartitions_cpus);
3911 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3912
3913 /* For v1, synchronize cpus_allowed to cpu_active_mask */
3914 if (cpus_updated) {
3915 cpuset_force_rebuild();
3916 spin_lock_irq(&callback_lock);
3917 if (!on_dfl)
3918 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3919 /*
3920 * Make sure that CPUs allocated to child partitions
3921 * do not show up in effective_cpus. If no CPU is left,
3922 * we clear the subpartitions_cpus & let the child partitions
3923 * fight for the CPUs again.
3924 */
3925 if (!cpumask_empty(subpartitions_cpus)) {
3926 if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
3927 top_cpuset.nr_subparts = 0;
3928 cpumask_clear(subpartitions_cpus);
3929 } else {
3930 cpumask_andnot(&new_cpus, &new_cpus,
3931 subpartitions_cpus);
3932 }
3933 }
3934 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3935 spin_unlock_irq(&callback_lock);
3936 /* we don't mess with cpumasks of tasks in top_cpuset */
3937 }
3938
3939 /* synchronize mems_allowed to N_MEMORY */
3940 if (mems_updated) {
3941 spin_lock_irq(&callback_lock);
3942 if (!on_dfl)
3943 top_cpuset.mems_allowed = new_mems;
3944 top_cpuset.effective_mems = new_mems;
3945 spin_unlock_irq(&callback_lock);
3946 cpuset_update_tasks_nodemask(&top_cpuset);
3947 }
3948
3949 mutex_unlock(&cpuset_mutex);
3950
3951 /* if cpus or mems changed, we need to propagate to descendants */
3952 if (cpus_updated || mems_updated) {
3953 struct cpuset *cs;
3954 struct cgroup_subsys_state *pos_css;
3955
3956 rcu_read_lock();
3957 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3958 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3959 continue;
3960 rcu_read_unlock();
3961
3962 cpuset_hotplug_update_tasks(cs, ptmp);
3963
3964 rcu_read_lock();
3965 css_put(&cs->css);
3966 }
3967 rcu_read_unlock();
3968 }
3969
3970 /* rebuild sched domains if necessary */
3971 if (force_sd_rebuild)
3972 rebuild_sched_domains_cpuslocked();
3973
3974 free_cpumasks(NULL, ptmp);
3975 }
3976
cpuset_update_active_cpus(void)3977 void cpuset_update_active_cpus(void)
3978 {
3979 /*
3980 * We're inside cpu hotplug critical region which usually nests
3981 * inside cgroup synchronization. Bounce actual hotplug processing
3982 * to a work item to avoid reverse locking order.
3983 */
3984 cpuset_handle_hotplug();
3985 }
3986
3987 /*
3988 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3989 * Call this routine anytime after node_states[N_MEMORY] changes.
3990 * See cpuset_update_active_cpus() for CPU hotplug handling.
3991 */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3992 static int cpuset_track_online_nodes(struct notifier_block *self,
3993 unsigned long action, void *arg)
3994 {
3995 cpuset_handle_hotplug();
3996 return NOTIFY_OK;
3997 }
3998
3999 /**
4000 * cpuset_init_smp - initialize cpus_allowed
4001 *
4002 * Description: Finish top cpuset after cpu, node maps are initialized
4003 */
cpuset_init_smp(void)4004 void __init cpuset_init_smp(void)
4005 {
4006 /*
4007 * cpus_allowd/mems_allowed set to v2 values in the initial
4008 * cpuset_bind() call will be reset to v1 values in another
4009 * cpuset_bind() call when v1 cpuset is mounted.
4010 */
4011 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
4012
4013 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
4014 top_cpuset.effective_mems = node_states[N_MEMORY];
4015
4016 hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
4017
4018 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
4019 BUG_ON(!cpuset_migrate_mm_wq);
4020 }
4021
4022 /**
4023 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
4024 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4025 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4026 *
4027 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
4028 * attached to the specified @tsk. Guaranteed to return some non-empty
4029 * subset of cpu_online_mask, even if this means going outside the
4030 * tasks cpuset, except when the task is in the top cpuset.
4031 **/
4032
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)4033 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
4034 {
4035 unsigned long flags;
4036 struct cpuset *cs;
4037
4038 spin_lock_irqsave(&callback_lock, flags);
4039 rcu_read_lock();
4040
4041 cs = task_cs(tsk);
4042 if (cs != &top_cpuset)
4043 guarantee_online_cpus(tsk, pmask);
4044 /*
4045 * Tasks in the top cpuset won't get update to their cpumasks
4046 * when a hotplug online/offline event happens. So we include all
4047 * offline cpus in the allowed cpu list.
4048 */
4049 if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
4050 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4051
4052 /*
4053 * We first exclude cpus allocated to partitions. If there is no
4054 * allowable online cpu left, we fall back to all possible cpus.
4055 */
4056 cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
4057 if (!cpumask_intersects(pmask, cpu_online_mask))
4058 cpumask_copy(pmask, possible_mask);
4059 }
4060
4061 rcu_read_unlock();
4062 spin_unlock_irqrestore(&callback_lock, flags);
4063 }
4064
4065 /**
4066 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4067 * @tsk: pointer to task_struct with which the scheduler is struggling
4068 *
4069 * Description: In the case that the scheduler cannot find an allowed cpu in
4070 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4071 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4072 * which will not contain a sane cpumask during cases such as cpu hotplugging.
4073 * This is the absolute last resort for the scheduler and it is only used if
4074 * _every_ other avenue has been traveled.
4075 *
4076 * Returns true if the affinity of @tsk was changed, false otherwise.
4077 **/
4078
cpuset_cpus_allowed_fallback(struct task_struct * tsk)4079 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
4080 {
4081 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4082 const struct cpumask *cs_mask;
4083 bool changed = false;
4084
4085 rcu_read_lock();
4086 cs_mask = task_cs(tsk)->cpus_allowed;
4087 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4088 do_set_cpus_allowed(tsk, cs_mask);
4089 changed = true;
4090 }
4091 rcu_read_unlock();
4092
4093 /*
4094 * We own tsk->cpus_allowed, nobody can change it under us.
4095 *
4096 * But we used cs && cs->cpus_allowed lockless and thus can
4097 * race with cgroup_attach_task() or update_cpumask() and get
4098 * the wrong tsk->cpus_allowed. However, both cases imply the
4099 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4100 * which takes task_rq_lock().
4101 *
4102 * If we are called after it dropped the lock we must see all
4103 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4104 * set any mask even if it is not right from task_cs() pov,
4105 * the pending set_cpus_allowed_ptr() will fix things.
4106 *
4107 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4108 * if required.
4109 */
4110 return changed;
4111 }
4112
cpuset_init_current_mems_allowed(void)4113 void __init cpuset_init_current_mems_allowed(void)
4114 {
4115 nodes_setall(current->mems_allowed);
4116 }
4117
4118 /**
4119 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4120 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4121 *
4122 * Description: Returns the nodemask_t mems_allowed of the cpuset
4123 * attached to the specified @tsk. Guaranteed to return some non-empty
4124 * subset of node_states[N_MEMORY], even if this means going outside the
4125 * tasks cpuset.
4126 **/
4127
cpuset_mems_allowed(struct task_struct * tsk)4128 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4129 {
4130 nodemask_t mask;
4131 unsigned long flags;
4132
4133 spin_lock_irqsave(&callback_lock, flags);
4134 rcu_read_lock();
4135 guarantee_online_mems(task_cs(tsk), &mask);
4136 rcu_read_unlock();
4137 spin_unlock_irqrestore(&callback_lock, flags);
4138
4139 return mask;
4140 }
4141
4142 /**
4143 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4144 * @nodemask: the nodemask to be checked
4145 *
4146 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4147 */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)4148 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4149 {
4150 return nodes_intersects(*nodemask, current->mems_allowed);
4151 }
4152
4153 /*
4154 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4155 * mem_hardwall ancestor to the specified cpuset. Call holding
4156 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
4157 * (an unusual configuration), then returns the root cpuset.
4158 */
nearest_hardwall_ancestor(struct cpuset * cs)4159 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4160 {
4161 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4162 cs = parent_cs(cs);
4163 return cs;
4164 }
4165
4166 /*
4167 * cpuset_node_allowed - Can we allocate on a memory node?
4168 * @node: is this an allowed node?
4169 * @gfp_mask: memory allocation flags
4170 *
4171 * If we're in interrupt, yes, we can always allocate. If @node is set in
4172 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
4173 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4174 * yes. If current has access to memory reserves as an oom victim, yes.
4175 * Otherwise, no.
4176 *
4177 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4178 * and do not allow allocations outside the current tasks cpuset
4179 * unless the task has been OOM killed.
4180 * GFP_KERNEL allocations are not so marked, so can escape to the
4181 * nearest enclosing hardwalled ancestor cpuset.
4182 *
4183 * Scanning up parent cpusets requires callback_lock. The
4184 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4185 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4186 * current tasks mems_allowed came up empty on the first pass over
4187 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
4188 * cpuset are short of memory, might require taking the callback_lock.
4189 *
4190 * The first call here from mm/page_alloc:get_page_from_freelist()
4191 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4192 * so no allocation on a node outside the cpuset is allowed (unless
4193 * in interrupt, of course).
4194 *
4195 * The second pass through get_page_from_freelist() doesn't even call
4196 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
4197 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4198 * in alloc_flags. That logic and the checks below have the combined
4199 * affect that:
4200 * in_interrupt - any node ok (current task context irrelevant)
4201 * GFP_ATOMIC - any node ok
4202 * tsk_is_oom_victim - any node ok
4203 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4204 * GFP_USER - only nodes in current tasks mems allowed ok.
4205 */
cpuset_node_allowed(int node,gfp_t gfp_mask)4206 bool cpuset_node_allowed(int node, gfp_t gfp_mask)
4207 {
4208 struct cpuset *cs; /* current cpuset ancestors */
4209 bool allowed; /* is allocation in zone z allowed? */
4210 unsigned long flags;
4211
4212 if (in_interrupt())
4213 return true;
4214 if (node_isset(node, current->mems_allowed))
4215 return true;
4216 /*
4217 * Allow tasks that have access to memory reserves because they have
4218 * been OOM killed to get memory anywhere.
4219 */
4220 if (unlikely(tsk_is_oom_victim(current)))
4221 return true;
4222 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4223 return false;
4224
4225 if (current->flags & PF_EXITING) /* Let dying task have memory */
4226 return true;
4227
4228 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4229 spin_lock_irqsave(&callback_lock, flags);
4230
4231 rcu_read_lock();
4232 cs = nearest_hardwall_ancestor(task_cs(current));
4233 allowed = node_isset(node, cs->mems_allowed);
4234 rcu_read_unlock();
4235
4236 spin_unlock_irqrestore(&callback_lock, flags);
4237 return allowed;
4238 }
4239
4240 /**
4241 * cpuset_spread_node() - On which node to begin search for a page
4242 * @rotor: round robin rotor
4243 *
4244 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4245 * tasks in a cpuset with is_spread_page or is_spread_slab set),
4246 * and if the memory allocation used cpuset_mem_spread_node()
4247 * to determine on which node to start looking, as it will for
4248 * certain page cache or slab cache pages such as used for file
4249 * system buffers and inode caches, then instead of starting on the
4250 * local node to look for a free page, rather spread the starting
4251 * node around the tasks mems_allowed nodes.
4252 *
4253 * We don't have to worry about the returned node being offline
4254 * because "it can't happen", and even if it did, it would be ok.
4255 *
4256 * The routines calling guarantee_online_mems() are careful to
4257 * only set nodes in task->mems_allowed that are online. So it
4258 * should not be possible for the following code to return an
4259 * offline node. But if it did, that would be ok, as this routine
4260 * is not returning the node where the allocation must be, only
4261 * the node where the search should start. The zonelist passed to
4262 * __alloc_pages() will include all nodes. If the slab allocator
4263 * is passed an offline node, it will fall back to the local node.
4264 * See kmem_cache_alloc_node().
4265 */
cpuset_spread_node(int * rotor)4266 static int cpuset_spread_node(int *rotor)
4267 {
4268 return *rotor = next_node_in(*rotor, current->mems_allowed);
4269 }
4270
4271 /**
4272 * cpuset_mem_spread_node() - On which node to begin search for a file page
4273 */
cpuset_mem_spread_node(void)4274 int cpuset_mem_spread_node(void)
4275 {
4276 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4277 current->cpuset_mem_spread_rotor =
4278 node_random(¤t->mems_allowed);
4279
4280 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
4281 }
4282
4283 /**
4284 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4285 * @tsk1: pointer to task_struct of some task.
4286 * @tsk2: pointer to task_struct of some other task.
4287 *
4288 * Description: Return true if @tsk1's mems_allowed intersects the
4289 * mems_allowed of @tsk2. Used by the OOM killer to determine if
4290 * one of the task's memory usage might impact the memory available
4291 * to the other.
4292 **/
4293
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)4294 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4295 const struct task_struct *tsk2)
4296 {
4297 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4298 }
4299
4300 /**
4301 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4302 *
4303 * Description: Prints current's name, cpuset name, and cached copy of its
4304 * mems_allowed to the kernel log.
4305 */
cpuset_print_current_mems_allowed(void)4306 void cpuset_print_current_mems_allowed(void)
4307 {
4308 struct cgroup *cgrp;
4309
4310 rcu_read_lock();
4311
4312 cgrp = task_cs(current)->css.cgroup;
4313 pr_cont(",cpuset=");
4314 pr_cont_cgroup_name(cgrp);
4315 pr_cont(",mems_allowed=%*pbl",
4316 nodemask_pr_args(¤t->mems_allowed));
4317
4318 rcu_read_unlock();
4319 }
4320
4321 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)4322 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4323 {
4324 seq_printf(m, "Mems_allowed:\t%*pb\n",
4325 nodemask_pr_args(&task->mems_allowed));
4326 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4327 nodemask_pr_args(&task->mems_allowed));
4328 }
4329