1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * kernel/cpuset.c
4 *
5 * Processor and Memory placement constraints for sets of tasks.
6 *
7 * Copyright (C) 2003 BULL SA.
8 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
9 * Copyright (C) 2006 Google, Inc
10 *
11 * Portions derived from Patrick Mochel's sysfs code.
12 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 *
14 * 2003-10-10 Written by Simon Derr.
15 * 2003-10-22 Updates by Stephen Hemminger.
16 * 2004 May-July Rework by Paul Jackson.
17 * 2006 Rework by Paul Menage to use generic cgroups
18 * 2008 Rework of the scheduler domains and CPU hotplug handling
19 * by Max Krasnyansky
20 */
21 #include "cpuset-internal.h"
22
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mm.h>
28 #include <linux/memory.h>
29 #include <linux/rcupdate.h>
30 #include <linux/sched.h>
31 #include <linux/sched/deadline.h>
32 #include <linux/sched/mm.h>
33 #include <linux/sched/task.h>
34 #include <linux/security.h>
35 #include <linux/oom.h>
36 #include <linux/sched/isolation.h>
37 #include <linux/wait.h>
38 #include <linux/workqueue.h>
39 #include <linux/task_work.h>
40
41 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
42 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
43
44 /*
45 * There could be abnormal cpuset configurations for cpu or memory
46 * node binding, add this key to provide a quick low-cost judgment
47 * of the situation.
48 */
49 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
50
51 static const char * const perr_strings[] = {
52 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive",
53 [PERR_INVPARENT] = "Parent is an invalid partition root",
54 [PERR_NOTPART] = "Parent is not a partition root",
55 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
56 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
57 [PERR_HOTPLUG] = "No cpu available due to hotplug",
58 [PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
59 [PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
60 [PERR_ACCESS] = "Enable partition not permitted",
61 [PERR_REMOTE] = "Have remote partition underneath",
62 };
63
64 /*
65 * CPUSET Locking Convention
66 * -------------------------
67 *
68 * Below are the four global/local locks guarding cpuset structures in lock
69 * acquisition order:
70 * - cpuset_top_mutex
71 * - cpu_hotplug_lock (cpus_read_lock/cpus_write_lock)
72 * - cpuset_mutex
73 * - callback_lock (raw spinlock)
74 *
75 * As cpuset will now indirectly flush a number of different workqueues in
76 * housekeeping_update() to update housekeeping cpumasks when the set of
77 * isolated CPUs is going to be changed, it may be vulnerable to deadlock
78 * if we hold cpus_read_lock while calling into housekeeping_update().
79 *
80 * The first cpuset_top_mutex will be held except when calling into
81 * cpuset_handle_hotplug() from the CPU hotplug code where cpus_write_lock
82 * and cpuset_mutex will be held instead. The main purpose of this mutex
83 * is to prevent regular cpuset control file write actions from interfering
84 * with the call to housekeeping_update(), though CPU hotplug operation can
85 * still happen in parallel. This mutex also provides protection for some
86 * internal variables.
87 *
88 * A task must hold all the remaining three locks to modify externally visible
89 * or used fields of cpusets, though some of the internally used cpuset fields
90 * and internal variables can be modified without holding callback_lock. If only
91 * reliable read access of the externally used fields are needed, a task can
92 * hold either cpuset_mutex or callback_lock which are exposed to other
93 * external subsystems.
94 *
95 * If a task holds cpu_hotplug_lock and cpuset_mutex, it blocks others,
96 * ensuring that it is the only task able to also acquire callback_lock and
97 * be able to modify cpusets. It can perform various checks on the cpuset
98 * structure first, knowing nothing will change. It can also allocate memory
99 * without holding callback_lock. While it is performing these checks, various
100 * callback routines can briefly acquire callback_lock to query cpusets. Once
101 * it is ready to make the changes, it takes callback_lock, blocking everyone
102 * else.
103 *
104 * Calls to the kernel memory allocator cannot be made while holding
105 * callback_lock which is a spinlock, as the memory allocator may sleep or
106 * call back into cpuset code and acquire callback_lock.
107 *
108 * Now, the task_struct fields mems_allowed and mempolicy may be changed
109 * by other task, we use alloc_lock in the task_struct fields to protect
110 * them.
111 *
112 * The cpuset_common_seq_show() handlers only hold callback_lock across
113 * small pieces of code, such as when reading out possibly multi-word
114 * cpumasks and nodemasks.
115 */
116
117 static DEFINE_MUTEX(cpuset_top_mutex);
118 static DEFINE_MUTEX(cpuset_mutex);
119
120 /*
121 * File level internal variables below follow one of the following exclusion
122 * rules.
123 *
124 * RWCS: Read/write-able by holding either cpus_write_lock (and optionally
125 * cpuset_mutex) or both cpus_read_lock and cpuset_mutex.
126 *
127 * CSCB: Readable by holding either cpuset_mutex or callback_lock. Writable
128 * by holding both cpuset_mutex and callback_lock.
129 *
130 * T: Read/write-able by holding the cpuset_top_mutex.
131 */
132
133 /*
134 * For local partitions, update to subpartitions_cpus & isolated_cpus is done
135 * in update_parent_effective_cpumask(). For remote partitions, it is done in
136 * the remote_partition_*() and remote_cpus_update() helpers.
137 */
138 /*
139 * Exclusive CPUs distributed out to local or remote sub-partitions of
140 * top_cpuset
141 */
142 static cpumask_var_t subpartitions_cpus; /* RWCS */
143
144 /*
145 * Exclusive CPUs in isolated partitions (shown in cpuset.cpus.isolated)
146 */
147 static cpumask_var_t isolated_cpus; /* CSCB */
148
149 /*
150 * Set if housekeeping cpumasks are to be updated.
151 */
152 static bool update_housekeeping; /* RWCS */
153
154 /*
155 * Copy of isolated_cpus to be passed to housekeeping_update()
156 */
157 static cpumask_var_t isolated_hk_cpus; /* T */
158
159 /*
160 * A flag to force sched domain rebuild at the end of an operation.
161 * It can be set in
162 * - update_partition_sd_lb()
163 * - update_cpumasks_hier()
164 * - cpuset_update_flag()
165 * - cpuset_hotplug_update_tasks()
166 * - cpuset_handle_hotplug()
167 *
168 * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
169 *
170 * Note that update_relax_domain_level() in cpuset-v1.c can still call
171 * rebuild_sched_domains_locked() directly without using this flag.
172 */
173 static bool force_sd_rebuild; /* RWCS */
174
175 /*
176 * Partition root states:
177 *
178 * 0 - member (not a partition root)
179 * 1 - partition root
180 * 2 - partition root without load balancing (isolated)
181 * -1 - invalid partition root
182 * -2 - invalid isolated partition root
183 *
184 * There are 2 types of partitions - local or remote. Local partitions are
185 * those whose parents are partition root themselves. Setting of
186 * cpuset.cpus.exclusive are optional in setting up local partitions.
187 * Remote partitions are those whose parents are not partition roots. Passing
188 * down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
189 * nodes are mandatory in creating a remote partition.
190 *
191 * For simplicity, a local partition can be created under a local or remote
192 * partition but a remote partition cannot have any partition root in its
193 * ancestor chain except the cgroup root.
194 *
195 * A valid partition can be formed by setting exclusive_cpus or cpus_allowed
196 * if exclusive_cpus is not set. In the case of partition with empty
197 * exclusive_cpus, all the conflicting exclusive CPUs specified in the
198 * following cpumasks of sibling cpusets will be removed from its
199 * cpus_allowed in determining its effective_xcpus.
200 * - effective_xcpus
201 * - exclusive_cpus
202 *
203 * The "cpuset.cpus.exclusive" control file should be used for setting up
204 * partition if the users want to get as many CPUs as possible.
205 */
206 #define PRS_MEMBER 0
207 #define PRS_ROOT 1
208 #define PRS_ISOLATED 2
209 #define PRS_INVALID_ROOT -1
210 #define PRS_INVALID_ISOLATED -2
211
212 /*
213 * Temporary cpumasks for working with partitions that are passed among
214 * functions to avoid memory allocation in inner functions.
215 */
216 struct tmpmasks {
217 cpumask_var_t addmask, delmask; /* For partition root */
218 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
219 };
220
inc_dl_tasks_cs(struct task_struct * p)221 void inc_dl_tasks_cs(struct task_struct *p)
222 {
223 struct cpuset *cs = task_cs(p);
224
225 cs->nr_deadline_tasks++;
226 }
227
dec_dl_tasks_cs(struct task_struct * p)228 void dec_dl_tasks_cs(struct task_struct *p)
229 {
230 struct cpuset *cs = task_cs(p);
231
232 cs->nr_deadline_tasks--;
233 }
234
is_partition_valid(const struct cpuset * cs)235 static inline bool is_partition_valid(const struct cpuset *cs)
236 {
237 return cs->partition_root_state > 0;
238 }
239
is_partition_invalid(const struct cpuset * cs)240 static inline bool is_partition_invalid(const struct cpuset *cs)
241 {
242 return cs->partition_root_state < 0;
243 }
244
cs_is_member(const struct cpuset * cs)245 static inline bool cs_is_member(const struct cpuset *cs)
246 {
247 return cs->partition_root_state == PRS_MEMBER;
248 }
249
250 /*
251 * Callers should hold callback_lock to modify partition_root_state.
252 */
make_partition_invalid(struct cpuset * cs)253 static inline void make_partition_invalid(struct cpuset *cs)
254 {
255 if (cs->partition_root_state > 0)
256 cs->partition_root_state = -cs->partition_root_state;
257 }
258
259 /*
260 * Send notification event of whenever partition_root_state changes.
261 */
notify_partition_change(struct cpuset * cs,int old_prs)262 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
263 {
264 if (old_prs == cs->partition_root_state)
265 return;
266 cgroup_file_notify(&cs->partition_file);
267
268 /* Reset prs_err if not invalid */
269 if (is_partition_valid(cs))
270 WRITE_ONCE(cs->prs_err, PERR_NONE);
271 }
272
273 /*
274 * The top_cpuset is always synchronized to cpu_active_mask and we should avoid
275 * using cpu_online_mask as much as possible. An active CPU is always an online
276 * CPU, but not vice versa. cpu_active_mask and cpu_online_mask can differ
277 * during hotplug operations. A CPU is marked active at the last stage of CPU
278 * bringup (CPUHP_AP_ACTIVE). It is also the stage where cpuset hotplug code
279 * will be called to update the sched domains so that the scheduler can move
280 * a normal task to a newly active CPU or remove tasks away from a newly
281 * inactivated CPU. The online bit is set much earlier in the CPU bringup
282 * process and cleared much later in CPU teardown.
283 *
284 * If cpu_online_mask is used while a hotunplug operation is happening in
285 * parallel, we may leave an offline CPU in cpu_allowed or some other masks.
286 */
287 struct cpuset top_cpuset = {
288 .flags = BIT(CS_CPU_EXCLUSIVE) |
289 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
290 .partition_root_state = PRS_ROOT,
291 .dl_bw_cpu = -1,
292 };
293
294 /**
295 * cpuset_lock - Acquire the global cpuset mutex
296 *
297 * This locks the global cpuset mutex to prevent modifications to cpuset
298 * hierarchy and configurations. This helper is not enough to make modification.
299 */
cpuset_lock(void)300 void cpuset_lock(void)
301 {
302 mutex_lock(&cpuset_mutex);
303 }
304
cpuset_unlock(void)305 void cpuset_unlock(void)
306 {
307 mutex_unlock(&cpuset_mutex);
308 }
309
lockdep_assert_cpuset_lock_held(void)310 void lockdep_assert_cpuset_lock_held(void)
311 {
312 lockdep_assert_held(&cpuset_mutex);
313 }
314
315 /**
316 * cpuset_full_lock - Acquire full protection for cpuset modification
317 *
318 * Takes both CPU hotplug read lock (cpus_read_lock()) and cpuset mutex
319 * to safely modify cpuset data.
320 */
cpuset_full_lock(void)321 void cpuset_full_lock(void)
322 {
323 mutex_lock(&cpuset_top_mutex);
324 cpus_read_lock();
325 mutex_lock(&cpuset_mutex);
326 }
327
cpuset_full_unlock(void)328 void cpuset_full_unlock(void)
329 {
330 mutex_unlock(&cpuset_mutex);
331 cpus_read_unlock();
332 mutex_unlock(&cpuset_top_mutex);
333 }
334
335 #ifdef CONFIG_LOCKDEP
lockdep_is_cpuset_held(void)336 bool lockdep_is_cpuset_held(void)
337 {
338 return lockdep_is_held(&cpuset_mutex) ||
339 lockdep_is_held(&cpuset_top_mutex);
340 }
341 #endif
342
343 static DEFINE_SPINLOCK(callback_lock);
344
cpuset_callback_lock_irq(void)345 void cpuset_callback_lock_irq(void)
346 {
347 spin_lock_irq(&callback_lock);
348 }
349
cpuset_callback_unlock_irq(void)350 void cpuset_callback_unlock_irq(void)
351 {
352 spin_unlock_irq(&callback_lock);
353 }
354
355 static struct workqueue_struct *cpuset_migrate_mm_wq;
356
357 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
358
check_insane_mems_config(nodemask_t * nodes)359 static inline void check_insane_mems_config(nodemask_t *nodes)
360 {
361 if (!cpusets_insane_config() &&
362 movable_only_nodes(nodes)) {
363 static_branch_enable_cpuslocked(&cpusets_insane_config_key);
364 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
365 "Cpuset allocations might fail even with a lot of memory available.\n",
366 nodemask_pr_args(nodes));
367 }
368 }
369
370 /*
371 * decrease cs->attach_in_progress.
372 * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
373 */
dec_attach_in_progress_locked(struct cpuset * cs)374 static inline void dec_attach_in_progress_locked(struct cpuset *cs)
375 {
376 lockdep_assert_cpuset_lock_held();
377
378 cs->attach_in_progress--;
379 if (!cs->attach_in_progress)
380 wake_up(&cpuset_attach_wq);
381 }
382
dec_attach_in_progress(struct cpuset * cs)383 static inline void dec_attach_in_progress(struct cpuset *cs)
384 {
385 mutex_lock(&cpuset_mutex);
386 dec_attach_in_progress_locked(cs);
387 mutex_unlock(&cpuset_mutex);
388 }
389
cpuset_v2(void)390 static inline bool cpuset_v2(void)
391 {
392 return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
393 cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
394 }
395
396 /*
397 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
398 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
399 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
400 * With v2 behavior, "cpus" and "mems" are always what the users have
401 * requested and won't be changed by hotplug events. Only the effective
402 * cpus or mems will be affected.
403 */
is_in_v2_mode(void)404 static inline bool is_in_v2_mode(void)
405 {
406 return cpuset_v2() ||
407 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
408 }
409
410 /**
411 * partition_is_populated - check if partition has tasks
412 * @cs: partition root to be checked
413 * @excluded_child: a child cpuset to be excluded in task checking
414 * Return: true if there are tasks, false otherwise
415 *
416 * @cs should be a valid partition root or going to become a partition root.
417 * @excluded_child should be non-NULL when this cpuset is going to become a
418 * partition itself.
419 *
420 * Note that a remote partition is not allowed underneath a valid local
421 * or remote partition. So if a non-partition root child is populated,
422 * the whole partition is considered populated.
423 */
partition_is_populated(struct cpuset * cs,struct cpuset * excluded_child)424 static inline bool partition_is_populated(struct cpuset *cs,
425 struct cpuset *excluded_child)
426 {
427 struct cpuset *cp;
428 struct cgroup_subsys_state *pos_css;
429
430 /*
431 * We cannot call cs_is_populated(cs) directly, as
432 * nr_populated_domain_children may include populated
433 * csets from descendants that are partitions.
434 */
435 if (cs->css.cgroup->nr_populated_csets ||
436 cs->attach_in_progress)
437 return true;
438
439 rcu_read_lock();
440 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
441 if (cp == cs || cp == excluded_child)
442 continue;
443
444 if (is_partition_valid(cp)) {
445 pos_css = css_rightmost_descendant(pos_css);
446 continue;
447 }
448
449 if (cpuset_is_populated(cp)) {
450 rcu_read_unlock();
451 return true;
452 }
453 }
454 rcu_read_unlock();
455 return false;
456 }
457
458 /*
459 * Return in pmask the portion of a task's cpusets's cpus_allowed that
460 * are online and are capable of running the task. If none are found,
461 * walk up the cpuset hierarchy until we find one that does have some
462 * appropriate cpus.
463 *
464 * One way or another, we guarantee to return some non-empty subset
465 * of cpu_active_mask.
466 *
467 * Call with callback_lock or cpuset_mutex held.
468 */
guarantee_active_cpus(struct task_struct * tsk,struct cpumask * pmask)469 static void guarantee_active_cpus(struct task_struct *tsk,
470 struct cpumask *pmask)
471 {
472 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
473 struct cpuset *cs;
474
475 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_active_mask)))
476 cpumask_copy(pmask, cpu_active_mask);
477
478 rcu_read_lock();
479 cs = task_cs(tsk);
480
481 while (!cpumask_intersects(cs->effective_cpus, pmask))
482 cs = parent_cs(cs);
483
484 cpumask_and(pmask, pmask, cs->effective_cpus);
485 rcu_read_unlock();
486 }
487
488 /*
489 * Return in *pmask the portion of a cpusets's mems_allowed that
490 * are online, with memory. If none are online with memory, walk
491 * up the cpuset hierarchy until we find one that does have some
492 * online mems. The top cpuset always has some mems online.
493 *
494 * One way or another, we guarantee to return some non-empty subset
495 * of node_states[N_MEMORY].
496 *
497 * Call with callback_lock or cpuset_mutex held.
498 */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)499 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
500 {
501 while (!nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]))
502 cs = parent_cs(cs);
503 }
504
505 /**
506 * alloc_cpumasks - Allocate an array of cpumask variables
507 * @pmasks: Pointer to array of cpumask_var_t pointers
508 * @size: Number of cpumasks to allocate
509 * Return: 0 if successful, -ENOMEM otherwise.
510 *
511 * Allocates @size cpumasks and initializes them to empty. Returns 0 on
512 * success, -ENOMEM on allocation failure. On failure, any previously
513 * allocated cpumasks are freed.
514 */
alloc_cpumasks(cpumask_var_t * pmasks[],u32 size)515 static inline int alloc_cpumasks(cpumask_var_t *pmasks[], u32 size)
516 {
517 int i;
518
519 for (i = 0; i < size; i++) {
520 if (!zalloc_cpumask_var(pmasks[i], GFP_KERNEL)) {
521 while (--i >= 0)
522 free_cpumask_var(*pmasks[i]);
523 return -ENOMEM;
524 }
525 }
526 return 0;
527 }
528
529 /**
530 * alloc_tmpmasks - Allocate temporary cpumasks for cpuset operations.
531 * @tmp: Pointer to tmpmasks structure to populate
532 * Return: 0 on success, -ENOMEM on allocation failure
533 */
alloc_tmpmasks(struct tmpmasks * tmp)534 static inline int alloc_tmpmasks(struct tmpmasks *tmp)
535 {
536 /*
537 * Array of pointers to the three cpumask_var_t fields in tmpmasks.
538 * Note: Array size must match actual number of masks (3)
539 */
540 cpumask_var_t *pmask[3] = {
541 &tmp->new_cpus,
542 &tmp->addmask,
543 &tmp->delmask
544 };
545
546 return alloc_cpumasks(pmask, ARRAY_SIZE(pmask));
547 }
548
549 /**
550 * free_tmpmasks - free cpumasks in a tmpmasks structure
551 * @tmp: the tmpmasks structure pointer
552 */
free_tmpmasks(struct tmpmasks * tmp)553 static inline void free_tmpmasks(struct tmpmasks *tmp)
554 {
555 if (!tmp)
556 return;
557
558 free_cpumask_var(tmp->new_cpus);
559 free_cpumask_var(tmp->addmask);
560 free_cpumask_var(tmp->delmask);
561 }
562
563 /**
564 * dup_or_alloc_cpuset - Duplicate or allocate a new cpuset
565 * @cs: Source cpuset to duplicate (NULL for a fresh allocation)
566 *
567 * Creates a new cpuset by either:
568 * 1. Duplicating an existing cpuset (if @cs is non-NULL), or
569 * 2. Allocating a fresh cpuset with zero-initialized masks (if @cs is NULL)
570 *
571 * Return: Pointer to newly allocated cpuset on success, NULL on failure
572 */
dup_or_alloc_cpuset(struct cpuset * cs)573 static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs)
574 {
575 struct cpuset *trial;
576
577 /* Allocate base structure */
578 trial = cs ? kmemdup(cs, sizeof(*cs), GFP_KERNEL) :
579 kzalloc_obj(*cs);
580 if (!trial)
581 return NULL;
582
583 trial->dl_bw_cpu = -1;
584
585 /* Setup cpumask pointer array */
586 cpumask_var_t *pmask[4] = {
587 &trial->cpus_allowed,
588 &trial->effective_cpus,
589 &trial->effective_xcpus,
590 &trial->exclusive_cpus
591 };
592
593 if (alloc_cpumasks(pmask, ARRAY_SIZE(pmask))) {
594 kfree(trial);
595 return NULL;
596 }
597
598 /* Copy masks if duplicating */
599 if (cs) {
600 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
601 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
602 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
603 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
604 }
605
606 return trial;
607 }
608
609 /**
610 * free_cpuset - free the cpuset
611 * @cs: the cpuset to be freed
612 */
free_cpuset(struct cpuset * cs)613 static inline void free_cpuset(struct cpuset *cs)
614 {
615 free_cpumask_var(cs->cpus_allowed);
616 free_cpumask_var(cs->effective_cpus);
617 free_cpumask_var(cs->effective_xcpus);
618 free_cpumask_var(cs->exclusive_cpus);
619 kfree(cs);
620 }
621
622 /* Return user specified exclusive CPUs */
user_xcpus(struct cpuset * cs)623 static inline struct cpumask *user_xcpus(struct cpuset *cs)
624 {
625 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
626 : cs->exclusive_cpus;
627 }
628
xcpus_empty(struct cpuset * cs)629 static inline bool xcpus_empty(struct cpuset *cs)
630 {
631 return cpumask_empty(cs->cpus_allowed) &&
632 cpumask_empty(cs->exclusive_cpus);
633 }
634
635 /*
636 * cpusets_are_exclusive() - check if two cpusets are exclusive
637 *
638 * Return true if exclusive, false if not
639 */
cpusets_are_exclusive(struct cpuset * cs1,struct cpuset * cs2)640 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
641 {
642 struct cpumask *xcpus1 = user_xcpus(cs1);
643 struct cpumask *xcpus2 = user_xcpus(cs2);
644
645 if (cpumask_intersects(xcpus1, xcpus2))
646 return false;
647 return true;
648 }
649
650 /**
651 * cpus_excl_conflict - Check if two cpusets have exclusive CPU conflicts
652 * @trial: the trial cpuset to be checked
653 * @sibling: a sibling cpuset to be checked against
654 * @xcpus_changed: set if exclusive_cpus has been set
655 *
656 * Returns: true if CPU exclusivity conflict exists, false otherwise
657 *
658 * Conflict detection rules:
659 * o cgroup v1
660 * See cpuset1_cpus_excl_conflict()
661 * o cgroup v2
662 * - The exclusive_cpus values cannot overlap.
663 * - New exclusive_cpus cannot be a superset of a sibling's cpus_allowed.
664 */
cpus_excl_conflict(struct cpuset * trial,struct cpuset * sibling,bool xcpus_changed)665 static inline bool cpus_excl_conflict(struct cpuset *trial, struct cpuset *sibling,
666 bool xcpus_changed)
667 {
668 if (!cpuset_v2())
669 return cpuset1_cpus_excl_conflict(trial, sibling);
670
671 /* The cpus_allowed of a sibling cpuset cannot be a subset of the new exclusive_cpus */
672 if (xcpus_changed && !cpumask_empty(sibling->cpus_allowed) &&
673 cpumask_subset(sibling->cpus_allowed, trial->exclusive_cpus))
674 return true;
675
676 /* Exclusive_cpus cannot intersect */
677 return cpumask_intersects(trial->exclusive_cpus, sibling->exclusive_cpus);
678 }
679
mems_excl_conflict(struct cpuset * cs1,struct cpuset * cs2)680 static inline bool mems_excl_conflict(struct cpuset *cs1, struct cpuset *cs2)
681 {
682 if ((is_mem_exclusive(cs1) || is_mem_exclusive(cs2)))
683 return nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
684 return false;
685 }
686
687 /*
688 * validate_change() - Used to validate that any proposed cpuset change
689 * follows the structural rules for cpusets.
690 *
691 * If we replaced the flag and mask values of the current cpuset
692 * (cur) with those values in the trial cpuset (trial), would
693 * our various subset and exclusive rules still be valid? Presumes
694 * cpuset_mutex held.
695 *
696 * 'cur' is the address of an actual, in-use cpuset. Operations
697 * such as list traversal that depend on the actual address of the
698 * cpuset in the list must use cur below, not trial.
699 *
700 * 'trial' is the address of bulk structure copy of cur, with
701 * perhaps one or more of the fields cpus_allowed, mems_allowed,
702 * or flags changed to new, trial values.
703 *
704 * Return 0 if valid, -errno if not.
705 */
706
validate_change(struct cpuset * cur,struct cpuset * trial)707 static int validate_change(struct cpuset *cur, struct cpuset *trial)
708 {
709 struct cgroup_subsys_state *css;
710 struct cpuset *c, *par;
711 bool xcpus_changed;
712 int ret = 0;
713
714 rcu_read_lock();
715
716 if (!is_in_v2_mode())
717 ret = cpuset1_validate_change(cur, trial);
718 if (ret)
719 goto out;
720
721 /* Remaining checks don't apply to root cpuset */
722 if (cur == &top_cpuset)
723 goto out;
724
725 par = parent_cs(cur);
726
727 /*
728 * We can't shrink if we won't have enough room for SCHED_DEADLINE
729 * tasks. This check is not done when scheduling is disabled as the
730 * users should know what they are doing.
731 *
732 * For v1, effective_cpus == cpus_allowed & user_xcpus() returns
733 * cpus_allowed.
734 *
735 * For v2, is_cpu_exclusive() & is_sched_load_balance() are true only
736 * for non-isolated partition root. At this point, the target
737 * effective_cpus isn't computed yet. user_xcpus() is the best
738 * approximation.
739 *
740 * TBD: May need to precompute the real effective_cpus here in case
741 * incorrect scheduling of SCHED_DEADLINE tasks in a partition
742 * becomes an issue.
743 */
744 ret = -EBUSY;
745 if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
746 !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
747 goto out;
748
749 /*
750 * If either I or some sibling (!= me) is exclusive, we can't
751 * overlap. exclusive_cpus cannot overlap with each other if set.
752 */
753 ret = -EINVAL;
754 xcpus_changed = !cpumask_equal(cur->exclusive_cpus, trial->exclusive_cpus);
755 cpuset_for_each_child(c, css, par) {
756 if (c == cur)
757 continue;
758 if (cpus_excl_conflict(trial, c, xcpus_changed))
759 goto out;
760 if (mems_excl_conflict(trial, c))
761 goto out;
762 }
763
764 ret = 0;
765 out:
766 rcu_read_unlock();
767 return ret;
768 }
769
770 #ifdef CONFIG_SMP
771
772 /*
773 * generate_sched_domains()
774 *
775 * This function builds a partial partition of the systems CPUs
776 * A 'partial partition' is a set of non-overlapping subsets whose
777 * union is a subset of that set.
778 * The output of this function needs to be passed to kernel/sched/core.c
779 * partition_sched_domains() routine, which will rebuild the scheduler's
780 * load balancing domains (sched domains) as specified by that partial
781 * partition.
782 *
783 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
784 * for a background explanation of this.
785 *
786 * Does not return errors, on the theory that the callers of this
787 * routine would rather not worry about failures to rebuild sched
788 * domains when operating in the severe memory shortage situations
789 * that could cause allocation failures below.
790 *
791 * Must be called with cpuset_mutex held.
792 *
793 * The three key local variables below are:
794 * cp - cpuset pointer, used (together with pos_css) to perform a
795 * top-down scan of all cpusets. For our purposes, rebuilding
796 * the schedulers sched domains, we can ignore !is_sched_load_
797 * balance cpusets.
798 * csa - (for CpuSet Array) Array of pointers to all the cpusets
799 * that need to be load balanced, for convenient iterative
800 * access by the subsequent code that finds the best partition,
801 * i.e the set of domains (subsets) of CPUs such that the
802 * cpus_allowed of every cpuset marked is_sched_load_balance
803 * is a subset of one of these domains, while there are as
804 * many such domains as possible, each as small as possible.
805 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
806 * the kernel/sched/core.c routine partition_sched_domains() in a
807 * convenient format, that can be easily compared to the prior
808 * value to determine what partition elements (sched domains)
809 * were changed (added or removed.)
810 */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)811 static int generate_sched_domains(cpumask_var_t **domains,
812 struct sched_domain_attr **attributes)
813 {
814 struct cpuset *cp; /* top-down scan of cpusets */
815 struct cpuset **csa; /* array of all cpuset ptrs */
816 int i, j; /* indices for partition finding loops */
817 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
818 struct sched_domain_attr *dattr; /* attributes for custom domains */
819 int ndoms = 0; /* number of sched domains in result */
820 struct cgroup_subsys_state *pos_css;
821
822 if (!cpuset_v2())
823 return cpuset1_generate_sched_domains(domains, attributes);
824
825 doms = NULL;
826 dattr = NULL;
827 csa = NULL;
828
829 /* Special case for the 99% of systems with one, full, sched domain */
830 if (cpumask_empty(subpartitions_cpus)) {
831 ndoms = 1;
832 /* !csa will be checked and can be correctly handled */
833 goto generate_doms;
834 }
835
836 csa = kmalloc_objs(cp, nr_cpusets());
837 if (!csa)
838 goto done;
839
840 /* Find how many partitions and cache them to csa[] */
841 rcu_read_lock();
842 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
843 /*
844 * Only valid partition roots that are not isolated and with
845 * non-empty effective_cpus will be saved into csa[].
846 */
847 if ((cp->partition_root_state == PRS_ROOT) &&
848 !cpumask_empty(cp->effective_cpus))
849 csa[ndoms++] = cp;
850
851 /*
852 * Skip @cp's subtree if not a partition root and has no
853 * exclusive CPUs to be granted to child cpusets.
854 */
855 if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
856 pos_css = css_rightmost_descendant(pos_css);
857 }
858 rcu_read_unlock();
859
860 for (i = 0; i < ndoms; i++) {
861 for (j = i + 1; j < ndoms; j++) {
862 if (cpusets_overlap(csa[i], csa[j]))
863 /*
864 * Cgroup v2 shouldn't pass down overlapping
865 * partition root cpusets.
866 */
867 WARN_ON_ONCE(1);
868 }
869 }
870
871 generate_doms:
872 doms = alloc_sched_domains(ndoms);
873 if (!doms)
874 goto done;
875
876 /*
877 * The rest of the code, including the scheduler, can deal with
878 * dattr==NULL case. No need to abort if alloc fails.
879 */
880 dattr = kmalloc_objs(struct sched_domain_attr, ndoms);
881
882 /*
883 * Cgroup v2 doesn't support domain attributes, just set all of them
884 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
885 * subset of HK_TYPE_DOMAIN_BOOT housekeeping CPUs.
886 */
887 for (i = 0; i < ndoms; i++) {
888 /*
889 * The top cpuset may contain some boot time isolated
890 * CPUs that need to be excluded from the sched domain.
891 */
892 if (!csa || csa[i] == &top_cpuset)
893 cpumask_and(doms[i], top_cpuset.effective_cpus,
894 housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT));
895 else
896 cpumask_copy(doms[i], csa[i]->effective_cpus);
897 if (dattr)
898 dattr[i] = SD_ATTR_INIT;
899 }
900
901 done:
902 kfree(csa);
903
904 /*
905 * Fallback to the default domain if kmalloc() failed.
906 * See comments in partition_sched_domains().
907 */
908 if (doms == NULL)
909 ndoms = 1;
910
911 *domains = doms;
912 *attributes = dattr;
913 return ndoms;
914 }
915
dl_update_tasks_root_domain(struct cpuset * cs)916 static void dl_update_tasks_root_domain(struct cpuset *cs)
917 {
918 struct css_task_iter it;
919 struct task_struct *task;
920
921 if (cs->nr_deadline_tasks == 0)
922 return;
923
924 css_task_iter_start(&cs->css, 0, &it);
925
926 while ((task = css_task_iter_next(&it)))
927 dl_add_task_root_domain(task);
928
929 css_task_iter_end(&it);
930 }
931
dl_rebuild_rd_accounting(void)932 void dl_rebuild_rd_accounting(void)
933 {
934 struct cpuset *cs = NULL;
935 struct cgroup_subsys_state *pos_css;
936 int cpu;
937 u64 cookie = ++dl_cookie;
938
939 lockdep_assert_cpuset_lock_held();
940 lockdep_assert_cpus_held();
941 lockdep_assert_held(&sched_domains_mutex);
942
943 rcu_read_lock();
944
945 for_each_possible_cpu(cpu) {
946 if (dl_bw_visited(cpu, cookie))
947 continue;
948
949 dl_clear_root_domain_cpu(cpu);
950 }
951
952 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
953
954 if (cpumask_empty(cs->effective_cpus)) {
955 pos_css = css_rightmost_descendant(pos_css);
956 continue;
957 }
958
959 css_get(&cs->css);
960
961 rcu_read_unlock();
962
963 dl_update_tasks_root_domain(cs);
964
965 rcu_read_lock();
966 css_put(&cs->css);
967 }
968 rcu_read_unlock();
969 }
970
971 /*
972 * Rebuild scheduler domains.
973 *
974 * If the flag 'sched_load_balance' of any cpuset with non-empty
975 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
976 * which has that flag enabled, or if any cpuset with a non-empty
977 * 'cpus' is removed, then call this routine to rebuild the
978 * scheduler's dynamic sched domains.
979 *
980 * Call with cpuset_mutex held. Takes cpus_read_lock().
981 */
rebuild_sched_domains_locked(void)982 void rebuild_sched_domains_locked(void)
983 {
984 struct sched_domain_attr *attr;
985 cpumask_var_t *doms;
986 int ndoms;
987 int i;
988
989 lockdep_assert_cpus_held();
990 lockdep_assert_cpuset_lock_held();
991 force_sd_rebuild = false;
992
993 /* Generate domain masks and attrs */
994 ndoms = generate_sched_domains(&doms, &attr);
995
996 /*
997 * cpuset_hotplug_workfn is invoked synchronously now, thus this
998 * function should not race with CPU hotplug. And the effective CPUs
999 * must not include any offline CPUs. Passing an offline CPU in the
1000 * doms to partition_sched_domains() will trigger a kernel panic.
1001 *
1002 * We perform a final check here: if the doms contains any
1003 * offline CPUs, a warning is emitted and we return directly to
1004 * prevent the panic.
1005 */
1006 for (i = 0; doms && i < ndoms; i++) {
1007 if (WARN_ON_ONCE(!cpumask_subset(doms[i], cpu_active_mask)))
1008 return;
1009 }
1010
1011 /* Have scheduler rebuild the domains */
1012 partition_sched_domains(ndoms, doms, attr);
1013 }
1014 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1015 void rebuild_sched_domains_locked(void)
1016 {
1017 }
1018 #endif /* CONFIG_SMP */
1019
rebuild_sched_domains_cpuslocked(void)1020 static void rebuild_sched_domains_cpuslocked(void)
1021 {
1022 mutex_lock(&cpuset_mutex);
1023 rebuild_sched_domains_locked();
1024 mutex_unlock(&cpuset_mutex);
1025 }
1026
rebuild_sched_domains(void)1027 void rebuild_sched_domains(void)
1028 {
1029 cpus_read_lock();
1030 rebuild_sched_domains_cpuslocked();
1031 cpus_read_unlock();
1032 }
1033
cpuset_reset_sched_domains(void)1034 void cpuset_reset_sched_domains(void)
1035 {
1036 mutex_lock(&cpuset_mutex);
1037 partition_sched_domains(1, NULL, NULL);
1038 mutex_unlock(&cpuset_mutex);
1039 }
1040
1041 /**
1042 * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1043 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1044 * @new_cpus: the temp variable for the new effective_cpus mask
1045 *
1046 * Iterate through each task of @cs updating its cpus_allowed to the
1047 * effective cpuset's. As this function is called with cpuset_mutex held,
1048 * cpuset membership stays stable.
1049 *
1050 * For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
1051 * to make sure all offline CPUs are also included as hotplug code won't
1052 * update cpumasks for tasks in top_cpuset.
1053 *
1054 * As task_cpu_possible_mask() can be task dependent in arm64, we have to
1055 * do cpu masking per task instead of doing it once for all.
1056 */
cpuset_update_tasks_cpumask(struct cpuset * cs,struct cpumask * new_cpus)1057 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1058 {
1059 struct css_task_iter it;
1060 struct task_struct *task;
1061 bool top_cs = cs == &top_cpuset;
1062
1063 css_task_iter_start(&cs->css, 0, &it);
1064 while ((task = css_task_iter_next(&it))) {
1065 const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1066
1067 if (top_cs) {
1068 /*
1069 * PF_KTHREAD tasks are handled by housekeeping.
1070 * PF_NO_SETAFFINITY tasks are ignored.
1071 */
1072 if (task->flags & (PF_KTHREAD | PF_NO_SETAFFINITY))
1073 continue;
1074 cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1075 } else {
1076 cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1077 }
1078 set_cpus_allowed_ptr(task, new_cpus);
1079 }
1080 css_task_iter_end(&it);
1081 }
1082
1083 /**
1084 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1085 * @new_cpus: the temp variable for the new effective_cpus mask
1086 * @cs: the cpuset the need to recompute the new effective_cpus mask
1087 * @parent: the parent cpuset
1088 *
1089 * The result is valid only if the given cpuset isn't a partition root.
1090 */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1091 static void compute_effective_cpumask(struct cpumask *new_cpus,
1092 struct cpuset *cs, struct cpuset *parent)
1093 {
1094 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1095 }
1096
1097 /*
1098 * Commands for update_parent_effective_cpumask
1099 */
1100 enum partition_cmd {
1101 partcmd_enable, /* Enable partition root */
1102 partcmd_enablei, /* Enable isolated partition root */
1103 partcmd_disable, /* Disable partition root */
1104 partcmd_update, /* Update parent's effective_cpus */
1105 partcmd_invalidate, /* Make partition invalid */
1106 };
1107
1108 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1109 struct tmpmasks *tmp);
1110
1111 /*
1112 * Update partition exclusive flag
1113 *
1114 * Return: 0 if successful, an error code otherwise
1115 */
update_partition_exclusive_flag(struct cpuset * cs,int new_prs)1116 static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
1117 {
1118 bool exclusive = (new_prs > PRS_MEMBER);
1119
1120 if (exclusive && !is_cpu_exclusive(cs)) {
1121 if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1122 return PERR_NOTEXCL;
1123 } else if (!exclusive && is_cpu_exclusive(cs)) {
1124 /* Turning off CS_CPU_EXCLUSIVE will not return error */
1125 cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1126 }
1127 return 0;
1128 }
1129
1130 /*
1131 * Update partition load balance flag and/or rebuild sched domain
1132 *
1133 * Changing load balance flag will automatically call
1134 * rebuild_sched_domains_locked().
1135 * This function is for cgroup v2 only.
1136 */
update_partition_sd_lb(struct cpuset * cs,int old_prs)1137 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1138 {
1139 int new_prs = cs->partition_root_state;
1140 bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1141 bool new_lb;
1142
1143 /*
1144 * If cs is not a valid partition root, the load balance state
1145 * will follow its parent.
1146 */
1147 if (new_prs > 0) {
1148 new_lb = (new_prs != PRS_ISOLATED);
1149 } else {
1150 new_lb = is_sched_load_balance(parent_cs(cs));
1151 }
1152 if (new_lb != !!is_sched_load_balance(cs)) {
1153 rebuild_domains = true;
1154 if (new_lb)
1155 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1156 else
1157 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1158 }
1159
1160 if (rebuild_domains)
1161 cpuset_force_rebuild();
1162 }
1163
1164 /*
1165 * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1166 */
tasks_nocpu_error(struct cpuset * parent,struct cpuset * cs,struct cpumask * xcpus)1167 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1168 struct cpumask *xcpus)
1169 {
1170 /*
1171 * A populated partition (cs or parent) can't have empty effective_cpus
1172 */
1173 return (cpumask_subset(parent->effective_cpus, xcpus) &&
1174 partition_is_populated(parent, cs)) ||
1175 (!cpumask_intersects(xcpus, cpu_active_mask) &&
1176 partition_is_populated(cs, NULL));
1177 }
1178
reset_partition_data(struct cpuset * cs)1179 static void reset_partition_data(struct cpuset *cs)
1180 {
1181 struct cpuset *parent = parent_cs(cs);
1182
1183 if (!cpuset_v2())
1184 return;
1185
1186 lockdep_assert_held(&callback_lock);
1187
1188 if (cpumask_empty(cs->exclusive_cpus)) {
1189 cpumask_clear(cs->effective_xcpus);
1190 if (is_cpu_exclusive(cs))
1191 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1192 }
1193 if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
1194 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1195 }
1196
1197 /*
1198 * isolated_cpus_update - Update the isolated_cpus mask
1199 * @old_prs: old partition_root_state
1200 * @new_prs: new partition_root_state
1201 * @xcpus: exclusive CPUs with state change
1202 */
isolated_cpus_update(int old_prs,int new_prs,struct cpumask * xcpus)1203 static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
1204 {
1205 WARN_ON_ONCE(old_prs == new_prs);
1206 lockdep_assert_held(&callback_lock);
1207 lockdep_assert_held(&cpuset_mutex);
1208 if (new_prs == PRS_ISOLATED) {
1209 if (cpumask_subset(xcpus, isolated_cpus))
1210 return;
1211 cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1212 } else {
1213 if (!cpumask_intersects(xcpus, isolated_cpus))
1214 return;
1215 cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1216 }
1217 update_housekeeping = true;
1218 }
1219
1220 /*
1221 * partition_xcpus_add - Add new exclusive CPUs to partition
1222 * @new_prs: new partition_root_state
1223 * @parent: parent cpuset
1224 * @xcpus: exclusive CPUs to be added
1225 *
1226 * Remote partition if parent == NULL
1227 */
partition_xcpus_add(int new_prs,struct cpuset * parent,struct cpumask * xcpus)1228 static void partition_xcpus_add(int new_prs, struct cpuset *parent,
1229 struct cpumask *xcpus)
1230 {
1231 WARN_ON_ONCE(new_prs < 0);
1232 lockdep_assert_held(&callback_lock);
1233 if (!parent)
1234 parent = &top_cpuset;
1235
1236
1237 if (parent == &top_cpuset)
1238 cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1239
1240 if (new_prs != parent->partition_root_state)
1241 isolated_cpus_update(parent->partition_root_state, new_prs,
1242 xcpus);
1243
1244 cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1245 }
1246
1247 /*
1248 * partition_xcpus_del - Remove exclusive CPUs from partition
1249 * @old_prs: old partition_root_state
1250 * @parent: parent cpuset
1251 * @xcpus: exclusive CPUs to be removed
1252 *
1253 * Remote partition if parent == NULL
1254 */
partition_xcpus_del(int old_prs,struct cpuset * parent,struct cpumask * xcpus)1255 static void partition_xcpus_del(int old_prs, struct cpuset *parent,
1256 struct cpumask *xcpus)
1257 {
1258 WARN_ON_ONCE(old_prs < 0);
1259 lockdep_assert_held(&callback_lock);
1260 if (!parent)
1261 parent = &top_cpuset;
1262
1263 if (parent == &top_cpuset)
1264 cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1265
1266 if (old_prs != parent->partition_root_state)
1267 isolated_cpus_update(old_prs, parent->partition_root_state,
1268 xcpus);
1269
1270 cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1271 cpumask_and(parent->effective_cpus, parent->effective_cpus, cpu_active_mask);
1272 }
1273
1274 /*
1275 * isolated_cpus_can_update - check for isolated & nohz_full conflicts
1276 * @add_cpus: cpu mask for cpus that are going to be isolated
1277 * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
1278 * Return: false if there is conflict, true otherwise
1279 *
1280 * If nohz_full is enabled and we have isolated CPUs, their combination must
1281 * still leave housekeeping CPUs.
1282 *
1283 * TBD: Should consider merging this function into
1284 * prstate_housekeeping_conflict().
1285 */
isolated_cpus_can_update(struct cpumask * add_cpus,struct cpumask * del_cpus)1286 static bool isolated_cpus_can_update(struct cpumask *add_cpus,
1287 struct cpumask *del_cpus)
1288 {
1289 cpumask_var_t full_hk_cpus;
1290 int res = true;
1291
1292 if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
1293 return true;
1294
1295 if (del_cpus && cpumask_weight_and(del_cpus,
1296 housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
1297 return true;
1298
1299 if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
1300 return false;
1301
1302 cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
1303 housekeeping_cpumask(HK_TYPE_DOMAIN));
1304 cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
1305 cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
1306 if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
1307 res = false;
1308
1309 free_cpumask_var(full_hk_cpus);
1310 return res;
1311 }
1312
1313 /*
1314 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1315 * @prstate: partition root state to be checked
1316 * @new_cpus: cpu mask
1317 * Return: true if there is conflict, false otherwise
1318 *
1319 * CPUs outside of HK_TYPE_DOMAIN_BOOT, if defined, can only be used in an
1320 * isolated partition.
1321 */
prstate_housekeeping_conflict(int prstate,struct cpumask * new_cpus)1322 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1323 {
1324 if (!housekeeping_enabled(HK_TYPE_DOMAIN_BOOT))
1325 return false;
1326
1327 if ((prstate != PRS_ISOLATED) &&
1328 !cpumask_subset(new_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT)))
1329 return true;
1330
1331 return false;
1332 }
1333
1334 /*
1335 * cpuset_update_sd_hk_unlock - Rebuild sched domains, update HK & unlock
1336 *
1337 * Update housekeeping cpumasks and rebuild sched domains if necessary and
1338 * then do a cpuset_full_unlock().
1339 * This should be called at the end of cpuset operation.
1340 */
cpuset_update_sd_hk_unlock(void)1341 static void cpuset_update_sd_hk_unlock(void)
1342 __releases(&cpuset_mutex)
1343 __releases(&cpuset_top_mutex)
1344 {
1345 /* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */
1346 if (force_sd_rebuild)
1347 rebuild_sched_domains_locked();
1348
1349 if (update_housekeeping) {
1350 update_housekeeping = false;
1351 cpumask_copy(isolated_hk_cpus, isolated_cpus);
1352
1353 /*
1354 * housekeeping_update() is now called without holding
1355 * cpus_read_lock and cpuset_mutex. Only cpuset_top_mutex
1356 * is still being held for mutual exclusion.
1357 */
1358 mutex_unlock(&cpuset_mutex);
1359 cpus_read_unlock();
1360 WARN_ON_ONCE(housekeeping_update(isolated_hk_cpus));
1361 mutex_unlock(&cpuset_top_mutex);
1362 } else {
1363 cpuset_full_unlock();
1364 }
1365 }
1366
1367 /*
1368 * Work function to invoke cpuset_update_sd_hk_unlock()
1369 */
hk_sd_workfn(struct work_struct * work)1370 static void hk_sd_workfn(struct work_struct *work)
1371 {
1372 cpuset_full_lock();
1373 cpuset_update_sd_hk_unlock();
1374 }
1375
1376 /**
1377 * rm_siblings_excl_cpus - Remove exclusive CPUs that are used by sibling cpusets
1378 * @parent: Parent cpuset containing all siblings
1379 * @cs: Current cpuset (will be skipped)
1380 * @excpus: exclusive effective CPU mask to modify
1381 *
1382 * This function ensures the given @excpus mask doesn't include any CPUs that
1383 * are exclusively allocated to sibling cpusets. It walks through all siblings
1384 * of @cs under @parent and removes their exclusive CPUs from @excpus.
1385 */
rm_siblings_excl_cpus(struct cpuset * parent,struct cpuset * cs,struct cpumask * excpus)1386 static int rm_siblings_excl_cpus(struct cpuset *parent, struct cpuset *cs,
1387 struct cpumask *excpus)
1388 {
1389 struct cgroup_subsys_state *css;
1390 struct cpuset *sibling;
1391 int retval = 0;
1392
1393 if (cpumask_empty(excpus))
1394 return 0;
1395
1396 /*
1397 * Remove exclusive CPUs from siblings
1398 */
1399 rcu_read_lock();
1400 cpuset_for_each_child(sibling, css, parent) {
1401 struct cpumask *sibling_xcpus;
1402
1403 if (sibling == cs)
1404 continue;
1405
1406 /*
1407 * If exclusive_cpus is defined, effective_xcpus will always
1408 * be a subset. Otherwise, effective_xcpus will only be set
1409 * in a valid partition root.
1410 */
1411 sibling_xcpus = cpumask_empty(sibling->exclusive_cpus)
1412 ? sibling->effective_xcpus
1413 : sibling->exclusive_cpus;
1414
1415 if (cpumask_intersects(excpus, sibling_xcpus)) {
1416 cpumask_andnot(excpus, excpus, sibling_xcpus);
1417 retval++;
1418 }
1419 }
1420 rcu_read_unlock();
1421
1422 return retval;
1423 }
1424
1425 /*
1426 * compute_excpus - compute effective exclusive CPUs
1427 * @cs: cpuset
1428 * @xcpus: effective exclusive CPUs value to be set
1429 * Return: 0 if there is no sibling conflict, > 0 otherwise
1430 *
1431 * If exclusive_cpus isn't explicitly set , we have to scan the sibling cpusets
1432 * and exclude their exclusive_cpus or effective_xcpus as well.
1433 */
compute_excpus(struct cpuset * cs,struct cpumask * excpus)1434 static int compute_excpus(struct cpuset *cs, struct cpumask *excpus)
1435 {
1436 struct cpuset *parent = parent_cs(cs);
1437
1438 cpumask_and(excpus, user_xcpus(cs), parent->effective_xcpus);
1439
1440 if (!cpumask_empty(cs->exclusive_cpus))
1441 return 0;
1442
1443 return rm_siblings_excl_cpus(parent, cs, excpus);
1444 }
1445
1446 /*
1447 * compute_trialcs_excpus - Compute effective exclusive CPUs for a trial cpuset
1448 * @trialcs: The trial cpuset containing the proposed new configuration
1449 * @cs: The original cpuset that the trial configuration is based on
1450 * Return: 0 if successful with no sibling conflict, >0 if a conflict is found
1451 *
1452 * Computes the effective_xcpus for a trial configuration. @cs is provided to represent
1453 * the real cs.
1454 */
compute_trialcs_excpus(struct cpuset * trialcs,struct cpuset * cs)1455 static int compute_trialcs_excpus(struct cpuset *trialcs, struct cpuset *cs)
1456 {
1457 struct cpuset *parent = parent_cs(trialcs);
1458 struct cpumask *excpus = trialcs->effective_xcpus;
1459
1460 /* trialcs is member, cpuset.cpus has no impact to excpus */
1461 if (cs_is_member(cs))
1462 cpumask_and(excpus, trialcs->exclusive_cpus,
1463 parent->effective_xcpus);
1464 else
1465 cpumask_and(excpus, user_xcpus(trialcs), parent->effective_xcpus);
1466
1467 return rm_siblings_excl_cpus(parent, cs, excpus);
1468 }
1469
is_remote_partition(struct cpuset * cs)1470 static inline bool is_remote_partition(struct cpuset *cs)
1471 {
1472 return cs->remote_partition;
1473 }
1474
is_local_partition(struct cpuset * cs)1475 static inline bool is_local_partition(struct cpuset *cs)
1476 {
1477 return is_partition_valid(cs) && !is_remote_partition(cs);
1478 }
1479
1480 /*
1481 * remote_partition_enable - Enable current cpuset as a remote partition root
1482 * @cs: the cpuset to update
1483 * @new_prs: new partition_root_state
1484 * @tmp: temporary masks
1485 * Return: 0 if successful, errcode if error
1486 *
1487 * Enable the current cpuset to become a remote partition root taking CPUs
1488 * directly from the top cpuset. cpuset_mutex must be held by the caller.
1489 */
remote_partition_enable(struct cpuset * cs,int new_prs,struct tmpmasks * tmp)1490 static int remote_partition_enable(struct cpuset *cs, int new_prs,
1491 struct tmpmasks *tmp)
1492 {
1493 /*
1494 * The user must have sysadmin privilege.
1495 */
1496 if (!capable(CAP_SYS_ADMIN))
1497 return PERR_ACCESS;
1498
1499 /*
1500 * The requested exclusive_cpus must not be allocated to other
1501 * partitions and it can't use up all the root's effective_cpus.
1502 *
1503 * The effective_xcpus mask can contain offline CPUs, but there must
1504 * be at least one or more online CPUs present before it can be enabled.
1505 *
1506 * Note that creating a remote partition with any local partition root
1507 * above it or remote partition root underneath it is not allowed.
1508 */
1509 compute_excpus(cs, tmp->new_cpus);
1510 WARN_ON_ONCE(cpumask_intersects(tmp->new_cpus, subpartitions_cpus));
1511 if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
1512 cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1513 return PERR_INVCPUS;
1514 if (((new_prs == PRS_ISOLATED) &&
1515 !isolated_cpus_can_update(tmp->new_cpus, NULL)) ||
1516 prstate_housekeeping_conflict(new_prs, tmp->new_cpus))
1517 return PERR_HKEEPING;
1518
1519 spin_lock_irq(&callback_lock);
1520 partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1521 cs->remote_partition = true;
1522 cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
1523 spin_unlock_irq(&callback_lock);
1524 cpuset_force_rebuild();
1525 cs->prs_err = 0;
1526
1527 /*
1528 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1529 */
1530 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1531 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1532 return 0;
1533 }
1534
1535 /*
1536 * remote_partition_disable - Remove current cpuset from remote partition list
1537 * @cs: the cpuset to update
1538 * @tmp: temporary masks
1539 *
1540 * The effective_cpus is also updated.
1541 *
1542 * cpuset_mutex must be held by the caller.
1543 */
remote_partition_disable(struct cpuset * cs,struct tmpmasks * tmp)1544 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1545 {
1546 WARN_ON_ONCE(!is_remote_partition(cs));
1547 /*
1548 * When a CPU is offlined, top_cpuset may end up with no available CPUs,
1549 * which should clear subpartitions_cpus. We should not emit a warning for this
1550 * scenario: the hierarchy is updated from top to bottom, so subpartitions_cpus
1551 * may already be cleared when disabling the partition.
1552 */
1553 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) &&
1554 !cpumask_empty(subpartitions_cpus));
1555
1556 spin_lock_irq(&callback_lock);
1557 cs->remote_partition = false;
1558 partition_xcpus_del(cs->partition_root_state, NULL, cs->effective_xcpus);
1559 if (cs->prs_err)
1560 cs->partition_root_state = -cs->partition_root_state;
1561 else
1562 cs->partition_root_state = PRS_MEMBER;
1563
1564 /* effective_xcpus may need to be changed */
1565 compute_excpus(cs, cs->effective_xcpus);
1566 reset_partition_data(cs);
1567 spin_unlock_irq(&callback_lock);
1568 cpuset_force_rebuild();
1569
1570 /*
1571 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1572 */
1573 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1574 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1575 }
1576
1577 /*
1578 * remote_cpus_update - cpus_exclusive change of remote partition
1579 * @cs: the cpuset to be updated
1580 * @xcpus: the new exclusive_cpus mask, if non-NULL
1581 * @excpus: the new effective_xcpus mask
1582 * @tmp: temporary masks
1583 *
1584 * top_cpuset and subpartitions_cpus will be updated or partition can be
1585 * invalidated.
1586 */
remote_cpus_update(struct cpuset * cs,struct cpumask * xcpus,struct cpumask * excpus,struct tmpmasks * tmp)1587 static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
1588 struct cpumask *excpus, struct tmpmasks *tmp)
1589 {
1590 bool adding, deleting;
1591 int prs = cs->partition_root_state;
1592
1593 if (WARN_ON_ONCE(!is_remote_partition(cs)))
1594 return;
1595
1596 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1597
1598 if (cpumask_empty(excpus)) {
1599 cs->prs_err = PERR_CPUSEMPTY;
1600 goto invalidate;
1601 }
1602
1603 adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus);
1604 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus);
1605
1606 /*
1607 * Additions of remote CPUs is only allowed if those CPUs are
1608 * not allocated to other partitions and there are effective_cpus
1609 * left in the top cpuset.
1610 */
1611 if (adding) {
1612 WARN_ON_ONCE(cpumask_intersects(tmp->addmask, subpartitions_cpus));
1613 if (!capable(CAP_SYS_ADMIN))
1614 cs->prs_err = PERR_ACCESS;
1615 else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1616 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
1617 cs->prs_err = PERR_NOCPUS;
1618 else if ((prs == PRS_ISOLATED) &&
1619 !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1620 cs->prs_err = PERR_HKEEPING;
1621 if (cs->prs_err)
1622 goto invalidate;
1623 }
1624
1625 spin_lock_irq(&callback_lock);
1626 if (adding)
1627 partition_xcpus_add(prs, NULL, tmp->addmask);
1628 if (deleting)
1629 partition_xcpus_del(prs, NULL, tmp->delmask);
1630 /*
1631 * Need to update effective_xcpus and exclusive_cpus now as
1632 * update_sibling_cpumasks() below may iterate back to the same cs.
1633 */
1634 cpumask_copy(cs->effective_xcpus, excpus);
1635 if (xcpus)
1636 cpumask_copy(cs->exclusive_cpus, xcpus);
1637 spin_unlock_irq(&callback_lock);
1638 if (adding || deleting)
1639 cpuset_force_rebuild();
1640
1641 /*
1642 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1643 */
1644 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1645 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1646 return;
1647
1648 invalidate:
1649 remote_partition_disable(cs, tmp);
1650 }
1651
1652 /**
1653 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1654 * @cs: The cpuset that requests change in partition root state
1655 * @cmd: Partition root state change command
1656 * @newmask: Optional new cpumask for partcmd_update
1657 * @tmp: Temporary addmask and delmask
1658 * Return: 0 or a partition root state error code
1659 *
1660 * For partcmd_enable*, the cpuset is being transformed from a non-partition
1661 * root to a partition root. The effective_xcpus (cpus_allowed if
1662 * effective_xcpus not set) mask of the given cpuset will be taken away from
1663 * parent's effective_cpus. The function will return 0 if all the CPUs listed
1664 * in effective_xcpus can be granted or an error code will be returned.
1665 *
1666 * For partcmd_disable, the cpuset is being transformed from a partition
1667 * root back to a non-partition root. Any CPUs in effective_xcpus will be
1668 * given back to parent's effective_cpus. 0 will always be returned.
1669 *
1670 * For partcmd_update, if the optional newmask is specified, the cpu list is
1671 * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1672 * assumed to remain the same. The cpuset should either be a valid or invalid
1673 * partition root. The partition root state may change from valid to invalid
1674 * or vice versa. An error code will be returned if transitioning from
1675 * invalid to valid violates the exclusivity rule.
1676 *
1677 * For partcmd_invalidate, the current partition will be made invalid.
1678 *
1679 * The partcmd_enable* and partcmd_disable commands are used by
1680 * update_prstate(). An error code may be returned and the caller will check
1681 * for error.
1682 *
1683 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1684 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1685 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1686 * check for error and so partition_root_state and prs_err will be updated
1687 * directly.
1688 */
update_parent_effective_cpumask(struct cpuset * cs,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1689 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1690 struct cpumask *newmask,
1691 struct tmpmasks *tmp)
1692 {
1693 struct cpuset *parent = parent_cs(cs);
1694 int adding; /* Adding cpus to parent's effective_cpus */
1695 int deleting; /* Deleting cpus from parent's effective_cpus */
1696 int old_prs, new_prs;
1697 int part_error = PERR_NONE; /* Partition error? */
1698 struct cpumask *xcpus = user_xcpus(cs);
1699 int parent_prs = parent->partition_root_state;
1700 bool nocpu;
1701
1702 lockdep_assert_cpuset_lock_held();
1703 WARN_ON_ONCE(is_remote_partition(cs)); /* For local partition only */
1704
1705 /*
1706 * new_prs will only be changed for the partcmd_update and
1707 * partcmd_invalidate commands.
1708 */
1709 adding = deleting = false;
1710 old_prs = new_prs = cs->partition_root_state;
1711
1712 if (cmd == partcmd_invalidate) {
1713 if (is_partition_invalid(cs))
1714 return 0;
1715
1716 /*
1717 * Make the current partition invalid.
1718 */
1719 if (is_partition_valid(parent))
1720 adding = cpumask_and(tmp->addmask,
1721 cs->effective_xcpus,
1722 parent->effective_xcpus);
1723 if (old_prs > 0)
1724 new_prs = -old_prs;
1725
1726 goto write_error;
1727 }
1728
1729 /*
1730 * The parent must be a partition root.
1731 * The new cpumask, if present, or the current cpus_allowed must
1732 * not be empty.
1733 */
1734 if (!is_partition_valid(parent)) {
1735 return is_partition_invalid(parent)
1736 ? PERR_INVPARENT : PERR_NOTPART;
1737 }
1738 if (!newmask && xcpus_empty(cs))
1739 return PERR_CPUSEMPTY;
1740
1741 nocpu = tasks_nocpu_error(parent, cs, xcpus);
1742
1743 if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
1744 /*
1745 * Need to call compute_excpus() in case
1746 * exclusive_cpus not set. Sibling conflict should only happen
1747 * if exclusive_cpus isn't set.
1748 */
1749 xcpus = tmp->delmask;
1750 if (compute_excpus(cs, xcpus))
1751 WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
1752 new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1753
1754 /*
1755 * Enabling partition root is not allowed if its
1756 * effective_xcpus is empty.
1757 */
1758 if (cpumask_empty(xcpus))
1759 return PERR_INVCPUS;
1760
1761 if (prstate_housekeeping_conflict(new_prs, xcpus))
1762 return PERR_HKEEPING;
1763
1764 if ((new_prs == PRS_ISOLATED) && (new_prs != parent_prs) &&
1765 !isolated_cpus_can_update(xcpus, NULL))
1766 return PERR_HKEEPING;
1767
1768 if (tasks_nocpu_error(parent, cs, xcpus))
1769 return PERR_NOCPUS;
1770
1771 /*
1772 * This function will only be called when all the preliminary
1773 * checks have passed. At this point, the following condition
1774 * should hold.
1775 *
1776 * (cs->effective_xcpus & cpu_active_mask) ⊆ parent->effective_cpus
1777 *
1778 * Warn if it is not the case.
1779 */
1780 cpumask_and(tmp->new_cpus, xcpus, cpu_active_mask);
1781 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1782
1783 deleting = true;
1784 } else if (cmd == partcmd_disable) {
1785 /*
1786 * May need to add cpus back to parent's effective_cpus
1787 * (and maybe removed from subpartitions_cpus/isolated_cpus)
1788 * for valid partition root. xcpus may contain CPUs that
1789 * shouldn't be removed from the two global cpumasks.
1790 */
1791 if (is_partition_valid(cs)) {
1792 cpumask_copy(tmp->addmask, cs->effective_xcpus);
1793 adding = true;
1794 }
1795 new_prs = PRS_MEMBER;
1796 } else if (newmask) {
1797 /*
1798 * Empty cpumask is not allowed
1799 */
1800 if (cpumask_empty(newmask)) {
1801 part_error = PERR_CPUSEMPTY;
1802 goto write_error;
1803 }
1804
1805 /* Check newmask again, whether cpus are available for parent/cs */
1806 nocpu |= tasks_nocpu_error(parent, cs, newmask);
1807
1808 /*
1809 * partcmd_update with newmask:
1810 *
1811 * Compute add/delete mask to/from effective_cpus
1812 *
1813 * For valid partition:
1814 * addmask = exclusive_cpus & ~newmask
1815 * & parent->effective_xcpus
1816 * delmask = newmask & ~exclusive_cpus
1817 * & parent->effective_xcpus
1818 *
1819 * For invalid partition:
1820 * delmask = newmask & parent->effective_xcpus
1821 * The partition may become valid soon.
1822 */
1823 if (is_partition_invalid(cs)) {
1824 adding = false;
1825 deleting = cpumask_and(tmp->delmask,
1826 newmask, parent->effective_xcpus);
1827 } else {
1828 cpumask_andnot(tmp->addmask, xcpus, newmask);
1829 adding = cpumask_and(tmp->addmask, tmp->addmask,
1830 parent->effective_xcpus);
1831
1832 cpumask_andnot(tmp->delmask, newmask, xcpus);
1833 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1834 parent->effective_xcpus);
1835 }
1836
1837 /*
1838 * TBD: Invalidate a currently valid child root partition may
1839 * still break isolated_cpus_can_update() rule if parent is an
1840 * isolated partition.
1841 */
1842 if (is_partition_valid(cs) && (old_prs != parent_prs)) {
1843 if ((parent_prs == PRS_ROOT) &&
1844 /* Adding to parent means removing isolated CPUs */
1845 !isolated_cpus_can_update(tmp->delmask, tmp->addmask))
1846 part_error = PERR_HKEEPING;
1847 if ((parent_prs == PRS_ISOLATED) &&
1848 /* Adding to parent means adding isolated CPUs */
1849 !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1850 part_error = PERR_HKEEPING;
1851 }
1852
1853 /*
1854 * The new CPUs to be removed from parent's effective CPUs
1855 * must be present.
1856 */
1857 if (deleting) {
1858 cpumask_and(tmp->new_cpus, tmp->delmask, cpu_active_mask);
1859 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1860 }
1861
1862 /*
1863 * Make partition invalid if parent's effective_cpus could
1864 * become empty and there are tasks in the parent.
1865 */
1866 if (nocpu && (!adding ||
1867 !cpumask_intersects(tmp->addmask, cpu_active_mask))) {
1868 part_error = PERR_NOCPUS;
1869 deleting = false;
1870 adding = cpumask_and(tmp->addmask,
1871 xcpus, parent->effective_xcpus);
1872 }
1873 } else {
1874 /*
1875 * partcmd_update w/o newmask
1876 *
1877 * delmask = effective_xcpus & parent->effective_cpus
1878 *
1879 * This can be called from:
1880 * 1) update_cpumasks_hier()
1881 * 2) cpuset_hotplug_update_tasks()
1882 *
1883 * Check to see if it can be transitioned from valid to
1884 * invalid partition or vice versa.
1885 *
1886 * A partition error happens when parent has tasks and all
1887 * its effective CPUs will have to be distributed out.
1888 */
1889 if (nocpu) {
1890 part_error = PERR_NOCPUS;
1891 if (is_partition_valid(cs))
1892 adding = cpumask_and(tmp->addmask,
1893 xcpus, parent->effective_xcpus);
1894 } else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
1895 cpumask_subset(xcpus, parent->effective_xcpus)) {
1896 struct cgroup_subsys_state *css;
1897 struct cpuset *child;
1898 bool exclusive = true;
1899
1900 /*
1901 * Convert invalid partition to valid has to
1902 * pass the cpu exclusivity test.
1903 */
1904 rcu_read_lock();
1905 cpuset_for_each_child(child, css, parent) {
1906 if (child == cs)
1907 continue;
1908 if (!cpusets_are_exclusive(cs, child)) {
1909 exclusive = false;
1910 break;
1911 }
1912 }
1913 rcu_read_unlock();
1914 if (exclusive)
1915 deleting = cpumask_and(tmp->delmask,
1916 xcpus, parent->effective_cpus);
1917 else
1918 part_error = PERR_NOTEXCL;
1919 }
1920 }
1921
1922 write_error:
1923 if (part_error)
1924 WRITE_ONCE(cs->prs_err, part_error);
1925
1926 if (cmd == partcmd_update) {
1927 /*
1928 * Check for possible transition between valid and invalid
1929 * partition root.
1930 */
1931 switch (cs->partition_root_state) {
1932 case PRS_ROOT:
1933 case PRS_ISOLATED:
1934 if (part_error)
1935 new_prs = -old_prs;
1936 break;
1937 case PRS_INVALID_ROOT:
1938 case PRS_INVALID_ISOLATED:
1939 if (!part_error)
1940 new_prs = -old_prs;
1941 break;
1942 }
1943 }
1944
1945 if (!adding && !deleting && (new_prs == old_prs))
1946 return 0;
1947
1948 /*
1949 * Transitioning between invalid to valid or vice versa may require
1950 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
1951 * validate_change() has already been successfully called and
1952 * CPU lists in cs haven't been updated yet. So defer it to later.
1953 */
1954 if ((old_prs != new_prs) && (cmd != partcmd_update)) {
1955 int err = update_partition_exclusive_flag(cs, new_prs);
1956
1957 if (err)
1958 return err;
1959 }
1960
1961 /*
1962 * Change the parent's effective_cpus & effective_xcpus (top cpuset
1963 * only).
1964 *
1965 * Newly added CPUs will be removed from effective_cpus and
1966 * newly deleted ones will be added back to effective_cpus.
1967 */
1968 spin_lock_irq(&callback_lock);
1969 if (old_prs != new_prs)
1970 cs->partition_root_state = new_prs;
1971
1972 /*
1973 * Adding to parent's effective_cpus means deletion CPUs from cs
1974 * and vice versa.
1975 */
1976 if (adding)
1977 partition_xcpus_del(old_prs, parent, tmp->addmask);
1978 if (deleting)
1979 partition_xcpus_add(new_prs, parent, tmp->delmask);
1980
1981 spin_unlock_irq(&callback_lock);
1982
1983 if ((old_prs != new_prs) && (cmd == partcmd_update))
1984 update_partition_exclusive_flag(cs, new_prs);
1985
1986 if (adding || deleting) {
1987 cpuset_update_tasks_cpumask(parent, tmp->addmask);
1988 update_sibling_cpumasks(parent, cs, tmp);
1989 }
1990
1991 /*
1992 * For partcmd_update without newmask, it is being called from
1993 * cpuset_handle_hotplug(). Update the load balance flag and
1994 * scheduling domain accordingly.
1995 */
1996 if ((cmd == partcmd_update) && !newmask)
1997 update_partition_sd_lb(cs, old_prs);
1998
1999 notify_partition_change(cs, old_prs);
2000 return 0;
2001 }
2002
2003 /**
2004 * compute_partition_effective_cpumask - compute effective_cpus for partition
2005 * @cs: partition root cpuset
2006 * @new_ecpus: previously computed effective_cpus to be updated
2007 *
2008 * Compute the effective_cpus of a partition root by scanning effective_xcpus
2009 * of child partition roots and excluding their effective_xcpus.
2010 *
2011 * This has the side effect of invalidating valid child partition roots,
2012 * if necessary. Since it is called from either cpuset_hotplug_update_tasks()
2013 * or update_cpumasks_hier() where parent and children are modified
2014 * successively, we don't need to call update_parent_effective_cpumask()
2015 * and the child's effective_cpus will be updated in later iterations.
2016 *
2017 * Note that rcu_read_lock() is assumed to be held.
2018 */
compute_partition_effective_cpumask(struct cpuset * cs,struct cpumask * new_ecpus)2019 static void compute_partition_effective_cpumask(struct cpuset *cs,
2020 struct cpumask *new_ecpus)
2021 {
2022 struct cgroup_subsys_state *css;
2023 struct cpuset *child;
2024 bool populated = partition_is_populated(cs, NULL);
2025
2026 /*
2027 * Check child partition roots to see if they should be
2028 * invalidated when
2029 * 1) child effective_xcpus not a subset of new
2030 * excluisve_cpus
2031 * 2) All the effective_cpus will be used up and cp
2032 * has tasks
2033 */
2034 compute_excpus(cs, new_ecpus);
2035 cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
2036
2037 rcu_read_lock();
2038 cpuset_for_each_child(child, css, cs) {
2039 if (!is_partition_valid(child))
2040 continue;
2041
2042 /*
2043 * There shouldn't be a remote partition underneath another
2044 * partition root.
2045 */
2046 WARN_ON_ONCE(is_remote_partition(child));
2047 child->prs_err = 0;
2048 if (!cpumask_subset(child->effective_xcpus,
2049 cs->effective_xcpus))
2050 child->prs_err = PERR_INVCPUS;
2051 else if (populated &&
2052 cpumask_subset(new_ecpus, child->effective_xcpus))
2053 child->prs_err = PERR_NOCPUS;
2054
2055 if (child->prs_err) {
2056 int old_prs = child->partition_root_state;
2057
2058 /*
2059 * Invalidate child partition
2060 */
2061 spin_lock_irq(&callback_lock);
2062 make_partition_invalid(child);
2063 spin_unlock_irq(&callback_lock);
2064 notify_partition_change(child, old_prs);
2065 continue;
2066 }
2067 cpumask_andnot(new_ecpus, new_ecpus,
2068 child->effective_xcpus);
2069 }
2070 rcu_read_unlock();
2071 }
2072
2073 /*
2074 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2075 * @cs: the cpuset to consider
2076 * @tmp: temp variables for calculating effective_cpus & partition setup
2077 * @force: don't skip any descendant cpusets if set
2078 *
2079 * When configured cpumask is changed, the effective cpumasks of this cpuset
2080 * and all its descendants need to be updated.
2081 *
2082 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
2083 *
2084 * Called with cpuset_mutex held
2085 */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp,bool force)2086 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2087 bool force)
2088 {
2089 struct cpuset *cp;
2090 struct cgroup_subsys_state *pos_css;
2091 int old_prs, new_prs;
2092
2093 rcu_read_lock();
2094 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2095 struct cpuset *parent = parent_cs(cp);
2096 bool remote = is_remote_partition(cp);
2097 bool update_parent = false;
2098
2099 old_prs = new_prs = cp->partition_root_state;
2100
2101 /*
2102 * For child remote partition root (!= cs), we need to call
2103 * remote_cpus_update() if effective_xcpus will be changed.
2104 * Otherwise, we can skip the whole subtree.
2105 *
2106 * remote_cpus_update() will reuse tmp->new_cpus only after
2107 * its value is being processed.
2108 */
2109 if (remote && (cp != cs)) {
2110 compute_excpus(cp, tmp->new_cpus);
2111 if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) {
2112 pos_css = css_rightmost_descendant(pos_css);
2113 continue;
2114 }
2115 rcu_read_unlock();
2116 remote_cpus_update(cp, NULL, tmp->new_cpus, tmp);
2117 rcu_read_lock();
2118
2119 /* Remote partition may be invalidated */
2120 new_prs = cp->partition_root_state;
2121 remote = (new_prs == old_prs);
2122 }
2123
2124 if (remote || (is_partition_valid(parent) && is_partition_valid(cp)))
2125 compute_partition_effective_cpumask(cp, tmp->new_cpus);
2126 else
2127 compute_effective_cpumask(tmp->new_cpus, cp, parent);
2128
2129 if (remote)
2130 goto get_css; /* Ready to update cpuset data */
2131
2132 /*
2133 * A partition with no effective_cpus is allowed as long as
2134 * there is no task associated with it. Call
2135 * update_parent_effective_cpumask() to check it.
2136 */
2137 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2138 update_parent = true;
2139 goto update_parent_effective;
2140 }
2141
2142 /*
2143 * If it becomes empty, inherit the effective mask of the
2144 * parent, which is guaranteed to have some CPUs unless
2145 * it is a partition root that has explicitly distributed
2146 * out all its CPUs.
2147 */
2148 if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
2149 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2150
2151 /*
2152 * Skip the whole subtree if
2153 * 1) the cpumask remains the same,
2154 * 2) has no partition root state,
2155 * 3) force flag not set, and
2156 * 4) for v2 load balance state same as its parent.
2157 */
2158 if (!cp->partition_root_state && !force &&
2159 cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2160 (!cpuset_v2() ||
2161 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2162 pos_css = css_rightmost_descendant(pos_css);
2163 continue;
2164 }
2165
2166 update_parent_effective:
2167 /*
2168 * update_parent_effective_cpumask() should have been called
2169 * for cs already in update_cpumask(). We should also call
2170 * cpuset_update_tasks_cpumask() again for tasks in the parent
2171 * cpuset if the parent's effective_cpus changes.
2172 */
2173 if ((cp != cs) && old_prs) {
2174 switch (parent->partition_root_state) {
2175 case PRS_ROOT:
2176 case PRS_ISOLATED:
2177 update_parent = true;
2178 break;
2179
2180 default:
2181 /*
2182 * When parent is not a partition root or is
2183 * invalid, child partition roots become
2184 * invalid too.
2185 */
2186 if (is_partition_valid(cp))
2187 new_prs = -cp->partition_root_state;
2188 WRITE_ONCE(cp->prs_err,
2189 is_partition_invalid(parent)
2190 ? PERR_INVPARENT : PERR_NOTPART);
2191 break;
2192 }
2193 }
2194 get_css:
2195 if (!css_tryget_online(&cp->css))
2196 continue;
2197 rcu_read_unlock();
2198
2199 if (update_parent) {
2200 update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2201 /*
2202 * The cpuset partition_root_state may become
2203 * invalid. Capture it.
2204 */
2205 new_prs = cp->partition_root_state;
2206 }
2207
2208 spin_lock_irq(&callback_lock);
2209 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2210 cp->partition_root_state = new_prs;
2211 /*
2212 * Need to compute effective_xcpus if either exclusive_cpus
2213 * is non-empty or it is a valid partition root.
2214 */
2215 if ((new_prs > 0) || !cpumask_empty(cp->exclusive_cpus))
2216 compute_excpus(cp, cp->effective_xcpus);
2217 if (new_prs <= 0)
2218 reset_partition_data(cp);
2219 spin_unlock_irq(&callback_lock);
2220
2221 notify_partition_change(cp, old_prs);
2222
2223 WARN_ON(!is_in_v2_mode() &&
2224 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2225
2226 cpuset_update_tasks_cpumask(cp, tmp->new_cpus);
2227
2228 /*
2229 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2230 * from parent if current cpuset isn't a valid partition root
2231 * and their load balance states differ.
2232 */
2233 if (cpuset_v2() && !is_partition_valid(cp) &&
2234 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2235 if (is_sched_load_balance(parent))
2236 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2237 else
2238 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2239 }
2240
2241 /*
2242 * On legacy hierarchy, if the effective cpumask of any non-
2243 * empty cpuset is changed, we need to rebuild sched domains.
2244 * On default hierarchy, the cpuset needs to be a partition
2245 * root as well.
2246 */
2247 if (!cpumask_empty(cp->cpus_allowed) &&
2248 is_sched_load_balance(cp) &&
2249 (!cpuset_v2() || is_partition_valid(cp)))
2250 cpuset_force_rebuild();
2251
2252 rcu_read_lock();
2253 css_put(&cp->css);
2254 }
2255 rcu_read_unlock();
2256 }
2257
2258 /**
2259 * update_sibling_cpumasks - Update siblings cpumasks
2260 * @parent: Parent cpuset
2261 * @cs: Current cpuset
2262 * @tmp: Temp variables
2263 */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)2264 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2265 struct tmpmasks *tmp)
2266 {
2267 struct cpuset *sibling;
2268 struct cgroup_subsys_state *pos_css;
2269
2270 lockdep_assert_cpuset_lock_held();
2271
2272 /*
2273 * Check all its siblings and call update_cpumasks_hier()
2274 * if their effective_cpus will need to be changed.
2275 *
2276 * It is possible a change in parent's effective_cpus
2277 * due to a change in a child partition's effective_xcpus will impact
2278 * its siblings even if they do not inherit parent's effective_cpus
2279 * directly. It should not impact valid partition.
2280 *
2281 * The update_cpumasks_hier() function may sleep. So we have to
2282 * release the RCU read lock before calling it.
2283 */
2284 rcu_read_lock();
2285 cpuset_for_each_child(sibling, pos_css, parent) {
2286 if (sibling == cs || is_partition_valid(sibling))
2287 continue;
2288
2289 compute_effective_cpumask(tmp->new_cpus, sibling,
2290 parent);
2291 if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2292 continue;
2293
2294 if (!css_tryget_online(&sibling->css))
2295 continue;
2296
2297 rcu_read_unlock();
2298 update_cpumasks_hier(sibling, tmp, false);
2299 rcu_read_lock();
2300 css_put(&sibling->css);
2301 }
2302 rcu_read_unlock();
2303 }
2304
parse_cpuset_cpulist(const char * buf,struct cpumask * out_mask)2305 static int parse_cpuset_cpulist(const char *buf, struct cpumask *out_mask)
2306 {
2307 int retval;
2308
2309 retval = cpulist_parse(buf, out_mask);
2310 if (retval < 0)
2311 return retval;
2312 if (!cpumask_subset(out_mask, top_cpuset.cpus_allowed))
2313 return -EINVAL;
2314
2315 return 0;
2316 }
2317
2318 /**
2319 * validate_partition - Validate a cpuset partition configuration
2320 * @cs: The cpuset to validate
2321 * @trialcs: The trial cpuset containing proposed configuration changes
2322 *
2323 * If any validation check fails, the appropriate error code is set in the
2324 * cpuset's prs_err field.
2325 *
2326 * Return: PRS error code (0 if valid, non-zero error code if invalid)
2327 */
validate_partition(struct cpuset * cs,struct cpuset * trialcs)2328 static enum prs_errcode validate_partition(struct cpuset *cs, struct cpuset *trialcs)
2329 {
2330 struct cpuset *parent = parent_cs(cs);
2331
2332 if (cs_is_member(trialcs))
2333 return PERR_NONE;
2334
2335 if (cpumask_empty(trialcs->effective_xcpus))
2336 return PERR_INVCPUS;
2337
2338 if (prstate_housekeeping_conflict(trialcs->partition_root_state,
2339 trialcs->effective_xcpus))
2340 return PERR_HKEEPING;
2341
2342 if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus))
2343 return PERR_NOCPUS;
2344
2345 return PERR_NONE;
2346 }
2347
2348 /**
2349 * partition_cpus_change - Handle partition state changes due to CPU mask updates
2350 * @cs: The target cpuset being modified
2351 * @trialcs: The trial cpuset containing proposed configuration changes
2352 * @tmp: Temporary masks for intermediate calculations
2353 *
2354 * This function handles partition state transitions triggered by CPU mask changes.
2355 * CPU modifications may cause a partition to be disabled or require state updates.
2356 */
partition_cpus_change(struct cpuset * cs,struct cpuset * trialcs,struct tmpmasks * tmp)2357 static void partition_cpus_change(struct cpuset *cs, struct cpuset *trialcs,
2358 struct tmpmasks *tmp)
2359 {
2360 enum prs_errcode prs_err;
2361
2362 if (cs_is_member(cs))
2363 return;
2364
2365 prs_err = validate_partition(cs, trialcs);
2366 if (prs_err)
2367 trialcs->prs_err = cs->prs_err = prs_err;
2368
2369 if (is_remote_partition(cs)) {
2370 if (trialcs->prs_err)
2371 remote_partition_disable(cs, tmp);
2372 else
2373 remote_cpus_update(cs, trialcs->exclusive_cpus,
2374 trialcs->effective_xcpus, tmp);
2375 } else {
2376 if (trialcs->prs_err)
2377 update_parent_effective_cpumask(cs, partcmd_invalidate,
2378 NULL, tmp);
2379 else
2380 update_parent_effective_cpumask(cs, partcmd_update,
2381 trialcs->effective_xcpus, tmp);
2382 }
2383 }
2384
2385 /**
2386 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2387 * @cs: the cpuset to consider
2388 * @trialcs: trial cpuset
2389 * @buf: buffer of cpu numbers written to this cpuset
2390 */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2391 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2392 const char *buf)
2393 {
2394 int retval;
2395 struct tmpmasks tmp;
2396 bool force = false;
2397 int old_prs = cs->partition_root_state;
2398
2399 retval = parse_cpuset_cpulist(buf, trialcs->cpus_allowed);
2400 if (retval < 0)
2401 return retval;
2402
2403 /* Nothing to do if the cpus didn't change */
2404 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2405 return 0;
2406
2407 compute_trialcs_excpus(trialcs, cs);
2408 trialcs->prs_err = PERR_NONE;
2409
2410 retval = validate_change(cs, trialcs);
2411 if (retval < 0)
2412 return retval;
2413
2414 if (alloc_tmpmasks(&tmp))
2415 return -ENOMEM;
2416
2417 /*
2418 * Check all the descendants in update_cpumasks_hier() if
2419 * effective_xcpus is to be changed.
2420 */
2421 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2422
2423 partition_cpus_change(cs, trialcs, &tmp);
2424
2425 spin_lock_irq(&callback_lock);
2426 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2427 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2428 if ((old_prs > 0) && !is_partition_valid(cs))
2429 reset_partition_data(cs);
2430 spin_unlock_irq(&callback_lock);
2431
2432 /* effective_cpus/effective_xcpus will be updated here */
2433 update_cpumasks_hier(cs, &tmp, force);
2434
2435 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2436 if (cs->partition_root_state)
2437 update_partition_sd_lb(cs, old_prs);
2438
2439 free_tmpmasks(&tmp);
2440 return retval;
2441 }
2442
2443 /**
2444 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2445 * @cs: the cpuset to consider
2446 * @trialcs: trial cpuset
2447 * @buf: buffer of cpu numbers written to this cpuset
2448 *
2449 * The tasks' cpumask will be updated if cs is a valid partition root.
2450 */
update_exclusive_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2451 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2452 const char *buf)
2453 {
2454 int retval;
2455 struct tmpmasks tmp;
2456 bool force = false;
2457 int old_prs = cs->partition_root_state;
2458
2459 retval = parse_cpuset_cpulist(buf, trialcs->exclusive_cpus);
2460 if (retval < 0)
2461 return retval;
2462
2463 /* Nothing to do if the CPUs didn't change */
2464 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2465 return 0;
2466
2467 /*
2468 * Reject the change if there is exclusive CPUs conflict with
2469 * the siblings.
2470 */
2471 if (compute_trialcs_excpus(trialcs, cs))
2472 return -EINVAL;
2473
2474 /*
2475 * Check all the descendants in update_cpumasks_hier() if
2476 * effective_xcpus is to be changed.
2477 */
2478 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2479
2480 retval = validate_change(cs, trialcs);
2481 if (retval)
2482 return retval;
2483
2484 if (alloc_tmpmasks(&tmp))
2485 return -ENOMEM;
2486
2487 trialcs->prs_err = PERR_NONE;
2488 partition_cpus_change(cs, trialcs, &tmp);
2489
2490 spin_lock_irq(&callback_lock);
2491 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2492 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2493 if ((old_prs > 0) && !is_partition_valid(cs))
2494 reset_partition_data(cs);
2495 spin_unlock_irq(&callback_lock);
2496
2497 /*
2498 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2499 * of the subtree when it is a valid partition root or effective_xcpus
2500 * is updated.
2501 */
2502 if (is_partition_valid(cs) || force)
2503 update_cpumasks_hier(cs, &tmp, force);
2504
2505 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2506 if (cs->partition_root_state)
2507 update_partition_sd_lb(cs, old_prs);
2508
2509 free_tmpmasks(&tmp);
2510 return 0;
2511 }
2512
2513 /*
2514 * Migrate memory region from one set of nodes to another. This is
2515 * performed asynchronously as it can be called from process migration path
2516 * holding locks involved in process management. All mm migrations are
2517 * performed in the queued order and can be waited for by flushing
2518 * cpuset_migrate_mm_wq.
2519 */
2520
2521 struct cpuset_migrate_mm_work {
2522 struct work_struct work;
2523 struct mm_struct *mm;
2524 nodemask_t from;
2525 nodemask_t to;
2526 };
2527
cpuset_migrate_mm_workfn(struct work_struct * work)2528 static void cpuset_migrate_mm_workfn(struct work_struct *work)
2529 {
2530 struct cpuset_migrate_mm_work *mwork =
2531 container_of(work, struct cpuset_migrate_mm_work, work);
2532
2533 /* on a wq worker, no need to worry about %current's mems_allowed */
2534 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2535 mmput(mwork->mm);
2536 kfree(mwork);
2537 }
2538
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)2539 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2540 const nodemask_t *to)
2541 {
2542 struct cpuset_migrate_mm_work *mwork;
2543
2544 if (nodes_equal(*from, *to)) {
2545 mmput(mm);
2546 return;
2547 }
2548
2549 mwork = kzalloc_obj(*mwork);
2550 if (mwork) {
2551 mwork->mm = mm;
2552 mwork->from = *from;
2553 mwork->to = *to;
2554 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2555 queue_work(cpuset_migrate_mm_wq, &mwork->work);
2556 } else {
2557 mmput(mm);
2558 }
2559 }
2560
flush_migrate_mm_task_workfn(struct callback_head * head)2561 static void flush_migrate_mm_task_workfn(struct callback_head *head)
2562 {
2563 flush_workqueue(cpuset_migrate_mm_wq);
2564 kfree(head);
2565 }
2566
schedule_flush_migrate_mm(void)2567 static void schedule_flush_migrate_mm(void)
2568 {
2569 struct callback_head *flush_cb;
2570
2571 flush_cb = kzalloc_obj(struct callback_head);
2572 if (!flush_cb)
2573 return;
2574
2575 init_task_work(flush_cb, flush_migrate_mm_task_workfn);
2576
2577 if (task_work_add(current, flush_cb, TWA_RESUME))
2578 kfree(flush_cb);
2579 }
2580
2581 /*
2582 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2583 * @tsk: the task to change
2584 * @newmems: new nodes that the task will be set
2585 *
2586 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2587 * and rebind an eventual tasks' mempolicy. If the task is allocating in
2588 * parallel, it might temporarily see an empty intersection, which results in
2589 * a seqlock check and retry before OOM or allocation failure.
2590 */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)2591 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2592 nodemask_t *newmems)
2593 {
2594 task_lock(tsk);
2595
2596 local_irq_disable();
2597 write_seqcount_begin(&tsk->mems_allowed_seq);
2598
2599 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2600 mpol_rebind_task(tsk, newmems);
2601 tsk->mems_allowed = *newmems;
2602
2603 write_seqcount_end(&tsk->mems_allowed_seq);
2604 local_irq_enable();
2605
2606 task_unlock(tsk);
2607 }
2608
2609 static void *cpuset_being_rebound;
2610
2611 /**
2612 * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2613 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2614 *
2615 * Iterate through each task of @cs updating its mems_allowed to the
2616 * effective cpuset's. As this function is called with cpuset_mutex held,
2617 * cpuset membership stays stable.
2618 */
cpuset_update_tasks_nodemask(struct cpuset * cs)2619 void cpuset_update_tasks_nodemask(struct cpuset *cs)
2620 {
2621 static nodemask_t newmems; /* protected by cpuset_mutex */
2622 struct css_task_iter it;
2623 struct task_struct *task;
2624
2625 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
2626
2627 guarantee_online_mems(cs, &newmems);
2628
2629 /*
2630 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2631 * take while holding tasklist_lock. Forks can happen - the
2632 * mpol_dup() cpuset_being_rebound check will catch such forks,
2633 * and rebind their vma mempolicies too. Because we still hold
2634 * the global cpuset_mutex, we know that no other rebind effort
2635 * will be contending for the global variable cpuset_being_rebound.
2636 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2637 * is idempotent. Also migrate pages in each mm to new nodes.
2638 */
2639 css_task_iter_start(&cs->css, 0, &it);
2640 while ((task = css_task_iter_next(&it))) {
2641 struct mm_struct *mm;
2642 bool migrate;
2643
2644 cpuset_change_task_nodemask(task, &newmems);
2645
2646 mm = get_task_mm(task);
2647 if (!mm)
2648 continue;
2649
2650 migrate = is_memory_migrate(cs);
2651
2652 mpol_rebind_mm(mm, &cs->mems_allowed);
2653 if (migrate)
2654 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2655 else
2656 mmput(mm);
2657 }
2658 css_task_iter_end(&it);
2659
2660 /*
2661 * All the tasks' nodemasks have been updated, update
2662 * cs->old_mems_allowed.
2663 */
2664 cs->old_mems_allowed = newmems;
2665
2666 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
2667 cpuset_being_rebound = NULL;
2668 }
2669
2670 /*
2671 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2672 * @cs: the cpuset to consider
2673 * @new_mems: a temp variable for calculating new effective_mems
2674 *
2675 * When configured nodemask is changed, the effective nodemasks of this cpuset
2676 * and all its descendants need to be updated.
2677 *
2678 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2679 *
2680 * Called with cpuset_mutex held
2681 */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)2682 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2683 {
2684 struct cpuset *cp;
2685 struct cgroup_subsys_state *pos_css;
2686
2687 rcu_read_lock();
2688 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2689 struct cpuset *parent = parent_cs(cp);
2690
2691 bool has_mems = nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2692
2693 /*
2694 * If it becomes empty, inherit the effective mask of the
2695 * parent, which is guaranteed to have some MEMs.
2696 */
2697 if (is_in_v2_mode() && !has_mems)
2698 *new_mems = parent->effective_mems;
2699
2700 /* Skip the whole subtree if the nodemask remains the same. */
2701 if (nodes_equal(*new_mems, cp->effective_mems)) {
2702 pos_css = css_rightmost_descendant(pos_css);
2703 continue;
2704 }
2705
2706 if (!css_tryget_online(&cp->css))
2707 continue;
2708 rcu_read_unlock();
2709
2710 spin_lock_irq(&callback_lock);
2711 cp->effective_mems = *new_mems;
2712 spin_unlock_irq(&callback_lock);
2713
2714 WARN_ON(!is_in_v2_mode() &&
2715 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2716
2717 cpuset_update_tasks_nodemask(cp);
2718
2719 rcu_read_lock();
2720 css_put(&cp->css);
2721 }
2722 rcu_read_unlock();
2723 }
2724
2725 /*
2726 * Handle user request to change the 'mems' memory placement
2727 * of a cpuset. Needs to validate the request, update the
2728 * cpusets mems_allowed, and for each task in the cpuset,
2729 * update mems_allowed and rebind task's mempolicy and any vma
2730 * mempolicies and if the cpuset is marked 'memory_migrate',
2731 * migrate the tasks pages to the new memory.
2732 *
2733 * Call with cpuset_mutex held. May take callback_lock during call.
2734 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2735 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2736 * their mempolicies to the cpusets new mems_allowed.
2737 */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2738 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2739 const char *buf)
2740 {
2741 int retval;
2742
2743 /*
2744 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2745 * The validate_change() call ensures that cpusets with tasks have memory.
2746 */
2747 retval = nodelist_parse(buf, trialcs->mems_allowed);
2748 if (retval < 0)
2749 return retval;
2750
2751 if (!nodes_subset(trialcs->mems_allowed,
2752 top_cpuset.mems_allowed))
2753 return -EINVAL;
2754
2755 /* No change? nothing to do */
2756 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed))
2757 return 0;
2758
2759 retval = validate_change(cs, trialcs);
2760 if (retval < 0)
2761 return retval;
2762
2763 check_insane_mems_config(&trialcs->mems_allowed);
2764
2765 spin_lock_irq(&callback_lock);
2766 cs->mems_allowed = trialcs->mems_allowed;
2767 spin_unlock_irq(&callback_lock);
2768
2769 /* use trialcs->mems_allowed as a temp variable */
2770 update_nodemasks_hier(cs, &trialcs->mems_allowed);
2771 return 0;
2772 }
2773
current_cpuset_is_being_rebound(void)2774 bool current_cpuset_is_being_rebound(void)
2775 {
2776 bool ret;
2777
2778 rcu_read_lock();
2779 ret = task_cs(current) == cpuset_being_rebound;
2780 rcu_read_unlock();
2781
2782 return ret;
2783 }
2784
2785 /*
2786 * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2787 * bit: the bit to update (see cpuset_flagbits_t)
2788 * cs: the cpuset to update
2789 * turning_on: whether the flag is being set or cleared
2790 *
2791 * Call with cpuset_mutex held.
2792 */
2793
cpuset_update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)2794 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2795 int turning_on)
2796 {
2797 struct cpuset *trialcs;
2798 int balance_flag_changed;
2799 int spread_flag_changed;
2800 int err;
2801
2802 trialcs = dup_or_alloc_cpuset(cs);
2803 if (!trialcs)
2804 return -ENOMEM;
2805
2806 if (turning_on)
2807 set_bit(bit, &trialcs->flags);
2808 else
2809 clear_bit(bit, &trialcs->flags);
2810
2811 err = validate_change(cs, trialcs);
2812 if (err < 0)
2813 goto out;
2814
2815 balance_flag_changed = (is_sched_load_balance(cs) !=
2816 is_sched_load_balance(trialcs));
2817
2818 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2819 || (is_spread_page(cs) != is_spread_page(trialcs)));
2820
2821 spin_lock_irq(&callback_lock);
2822 cs->flags = trialcs->flags;
2823 spin_unlock_irq(&callback_lock);
2824
2825 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
2826 if (cpuset_v2())
2827 cpuset_force_rebuild();
2828 else
2829 rebuild_sched_domains_locked();
2830 }
2831
2832 if (spread_flag_changed)
2833 cpuset1_update_tasks_flags(cs);
2834 out:
2835 free_cpuset(trialcs);
2836 return err;
2837 }
2838
2839 /**
2840 * update_prstate - update partition_root_state
2841 * @cs: the cpuset to update
2842 * @new_prs: new partition root state
2843 * Return: 0 if successful, != 0 if error
2844 *
2845 * Call with cpuset_mutex held.
2846 */
update_prstate(struct cpuset * cs,int new_prs)2847 static int update_prstate(struct cpuset *cs, int new_prs)
2848 {
2849 int err = PERR_NONE, old_prs = cs->partition_root_state;
2850 struct cpuset *parent = parent_cs(cs);
2851 struct tmpmasks tmpmask;
2852 bool isolcpus_updated = false;
2853
2854 if (old_prs == new_prs)
2855 return 0;
2856
2857 /*
2858 * Treat a previously invalid partition root as if it is a "member".
2859 */
2860 if (new_prs && is_partition_invalid(cs))
2861 old_prs = PRS_MEMBER;
2862
2863 if (alloc_tmpmasks(&tmpmask))
2864 return -ENOMEM;
2865
2866 err = update_partition_exclusive_flag(cs, new_prs);
2867 if (err)
2868 goto out;
2869
2870 if (!old_prs) {
2871 /*
2872 * cpus_allowed and exclusive_cpus cannot be both empty.
2873 */
2874 if (xcpus_empty(cs)) {
2875 err = PERR_CPUSEMPTY;
2876 goto out;
2877 }
2878
2879 /*
2880 * We don't support the creation of a new local partition with
2881 * a remote partition underneath it. This unsupported
2882 * setting can happen only if parent is the top_cpuset because
2883 * a remote partition cannot be created underneath an existing
2884 * local or remote partition.
2885 */
2886 if ((parent == &top_cpuset) &&
2887 cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
2888 err = PERR_REMOTE;
2889 goto out;
2890 }
2891
2892 /*
2893 * If parent is valid partition, enable local partiion.
2894 * Otherwise, enable a remote partition.
2895 */
2896 if (is_partition_valid(parent)) {
2897 enum partition_cmd cmd = (new_prs == PRS_ROOT)
2898 ? partcmd_enable : partcmd_enablei;
2899
2900 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
2901 } else {
2902 err = remote_partition_enable(cs, new_prs, &tmpmask);
2903 }
2904 } else if (old_prs && new_prs) {
2905 /*
2906 * A change in load balance state only, no change in cpumasks.
2907 * Need to update isolated_cpus.
2908 */
2909 if (((new_prs == PRS_ISOLATED) &&
2910 !isolated_cpus_can_update(cs->effective_xcpus, NULL)) ||
2911 prstate_housekeeping_conflict(new_prs, cs->effective_xcpus))
2912 err = PERR_HKEEPING;
2913 else
2914 isolcpus_updated = true;
2915 } else {
2916 /*
2917 * Switching back to member is always allowed even if it
2918 * disables child partitions.
2919 */
2920 if (is_remote_partition(cs))
2921 remote_partition_disable(cs, &tmpmask);
2922 else
2923 update_parent_effective_cpumask(cs, partcmd_disable,
2924 NULL, &tmpmask);
2925
2926 /*
2927 * Invalidation of child partitions will be done in
2928 * update_cpumasks_hier().
2929 */
2930 }
2931 out:
2932 /*
2933 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2934 * happens.
2935 */
2936 if (err) {
2937 new_prs = -new_prs;
2938 update_partition_exclusive_flag(cs, new_prs);
2939 }
2940
2941 spin_lock_irq(&callback_lock);
2942 cs->partition_root_state = new_prs;
2943 WRITE_ONCE(cs->prs_err, err);
2944 if (!is_partition_valid(cs))
2945 reset_partition_data(cs);
2946 else if (isolcpus_updated)
2947 isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
2948 spin_unlock_irq(&callback_lock);
2949
2950 /* Force update if switching back to member & update effective_xcpus */
2951 update_cpumasks_hier(cs, &tmpmask, !new_prs);
2952
2953 /* A newly created partition must have effective_xcpus set */
2954 WARN_ON_ONCE(!old_prs && (new_prs > 0)
2955 && cpumask_empty(cs->effective_xcpus));
2956
2957 /* Update sched domains and load balance flag */
2958 update_partition_sd_lb(cs, old_prs);
2959
2960 notify_partition_change(cs, old_prs);
2961 if (force_sd_rebuild)
2962 rebuild_sched_domains_locked();
2963 free_tmpmasks(&tmpmask);
2964 return 0;
2965 }
2966
2967 static struct cpuset *cpuset_attach_old_cs;
2968
2969 /*
2970 * Check to see if a cpuset can accept a new task
2971 * For v1, cpus_allowed and mems_allowed can't be empty.
2972 * For v2, effective_cpus can't be empty.
2973 * Note that in v1, effective_cpus = cpus_allowed.
2974 */
cpuset_can_attach_check(struct cpuset * cs)2975 static int cpuset_can_attach_check(struct cpuset *cs)
2976 {
2977 if (cpumask_empty(cs->effective_cpus) ||
2978 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2979 return -ENOSPC;
2980 return 0;
2981 }
2982
reset_migrate_dl_data(struct cpuset * cs)2983 static void reset_migrate_dl_data(struct cpuset *cs)
2984 {
2985 cs->nr_migrate_dl_tasks = 0;
2986 cs->sum_migrate_dl_bw = 0;
2987 cs->dl_bw_cpu = -1;
2988 }
2989
2990 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2991 static int cpuset_can_attach(struct cgroup_taskset *tset)
2992 {
2993 struct cgroup_subsys_state *css;
2994 struct cpuset *cs, *oldcs;
2995 struct task_struct *task;
2996 bool setsched_check;
2997 int cpu, ret;
2998
2999 /* used later by cpuset_attach() */
3000 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
3001 oldcs = cpuset_attach_old_cs;
3002 cs = css_cs(css);
3003
3004 mutex_lock(&cpuset_mutex);
3005
3006 /* Check to see if task is allowed in the cpuset */
3007 ret = cpuset_can_attach_check(cs);
3008 if (ret)
3009 goto out_unlock;
3010
3011 /*
3012 * Skip rights over task setsched check in v2 when nothing changes,
3013 * migration permission derives from hierarchy ownership in
3014 * cgroup_procs_write_permission()).
3015 */
3016 setsched_check = !cpuset_v2() ||
3017 !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus) ||
3018 !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3019
3020 /*
3021 * A v1 cpuset with tasks will have no CPU left only when CPU hotplug
3022 * brings the last online CPU offline as users are not allowed to empty
3023 * cpuset.cpus when there are active tasks inside. When that happens,
3024 * we should allow tasks to migrate out without security check to make
3025 * sure they will be able to run after migration.
3026 */
3027 if (!is_in_v2_mode() && cpumask_empty(oldcs->effective_cpus))
3028 setsched_check = false;
3029
3030 cgroup_taskset_for_each(task, css, tset) {
3031 ret = task_can_attach(task);
3032 if (ret)
3033 goto out_unlock;
3034
3035 if (setsched_check) {
3036 ret = security_task_setscheduler(task);
3037 if (ret)
3038 goto out_unlock;
3039 }
3040
3041 if (dl_task(task)) {
3042 /*
3043 * Count all migrating DL tasks for cpuset task accounting.
3044 * Only tasks that need a root-domain bandwidth move
3045 * contribute to sum_migrate_dl_bw.
3046 */
3047 cs->nr_migrate_dl_tasks++;
3048 if (dl_task_needs_bw_move(task, cs->effective_cpus))
3049 cs->sum_migrate_dl_bw += task->dl.dl_bw;
3050 }
3051 }
3052
3053 if (!cs->sum_migrate_dl_bw)
3054 goto out_success;
3055
3056 cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
3057 if (unlikely(cpu >= nr_cpu_ids)) {
3058 ret = -EINVAL;
3059 goto out_unlock;
3060 }
3061
3062 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
3063 if (ret)
3064 goto out_unlock;
3065
3066 cs->dl_bw_cpu = cpu;
3067
3068 out_success:
3069 /*
3070 * Mark attach is in progress. This makes validate_change() fail
3071 * changes which zero cpus/mems_allowed.
3072 */
3073 cs->attach_in_progress++;
3074
3075 out_unlock:
3076 if (ret)
3077 reset_migrate_dl_data(cs);
3078 mutex_unlock(&cpuset_mutex);
3079 return ret;
3080 }
3081
cpuset_cancel_attach(struct cgroup_taskset * tset)3082 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
3083 {
3084 struct cgroup_subsys_state *css;
3085 struct cpuset *cs;
3086
3087 cgroup_taskset_first(tset, &css);
3088 cs = css_cs(css);
3089
3090 mutex_lock(&cpuset_mutex);
3091 dec_attach_in_progress_locked(cs);
3092
3093 if (cs->dl_bw_cpu >= 0)
3094 dl_bw_free(cs->dl_bw_cpu, cs->sum_migrate_dl_bw);
3095
3096 if (cs->nr_migrate_dl_tasks)
3097 reset_migrate_dl_data(cs);
3098
3099 mutex_unlock(&cpuset_mutex);
3100 }
3101
3102 /*
3103 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
3104 * but we can't allocate it dynamically there. Define it global and
3105 * allocate from cpuset_init().
3106 */
3107 static cpumask_var_t cpus_attach;
3108 static nodemask_t cpuset_attach_nodemask_to;
3109
cpuset_attach_task(struct cpuset * cs,struct task_struct * task)3110 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3111 {
3112 lockdep_assert_cpuset_lock_held();
3113
3114 if (cs != &top_cpuset)
3115 guarantee_active_cpus(task, cpus_attach);
3116 else
3117 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3118 subpartitions_cpus);
3119 /*
3120 * can_attach beforehand should guarantee that this doesn't
3121 * fail. TODO: have a better way to handle failure here
3122 */
3123 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3124
3125 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3126 cpuset1_update_task_spread_flags(cs, task);
3127 }
3128
cpuset_attach(struct cgroup_taskset * tset)3129 static void cpuset_attach(struct cgroup_taskset *tset)
3130 {
3131 struct task_struct *task;
3132 struct task_struct *leader;
3133 struct cgroup_subsys_state *css;
3134 struct cpuset *cs;
3135 struct cpuset *oldcs = cpuset_attach_old_cs;
3136 bool cpus_updated, mems_updated;
3137 bool queue_task_work = false;
3138
3139 cgroup_taskset_first(tset, &css);
3140 cs = css_cs(css);
3141
3142 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
3143 mutex_lock(&cpuset_mutex);
3144 cpus_updated = !cpumask_equal(cs->effective_cpus,
3145 oldcs->effective_cpus);
3146 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3147
3148 /*
3149 * In the default hierarchy, enabling cpuset in the child cgroups
3150 * will trigger a number of cpuset_attach() calls with no change
3151 * in effective cpus and mems. In that case, we can optimize out
3152 * by skipping the task iteration and update.
3153 */
3154 if (cpuset_v2() && !cpus_updated && !mems_updated) {
3155 cpuset_attach_nodemask_to = cs->effective_mems;
3156 goto out;
3157 }
3158
3159 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3160
3161 cgroup_taskset_for_each(task, css, tset)
3162 cpuset_attach_task(cs, task);
3163
3164 /*
3165 * Change mm for all threadgroup leaders. This is expensive and may
3166 * sleep and should be moved outside migration path proper. Skip it
3167 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3168 * not set.
3169 */
3170 cpuset_attach_nodemask_to = cs->effective_mems;
3171 if (!is_memory_migrate(cs) && !mems_updated)
3172 goto out;
3173
3174 cgroup_taskset_for_each_leader(leader, css, tset) {
3175 struct mm_struct *mm = get_task_mm(leader);
3176
3177 if (mm) {
3178 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3179
3180 /*
3181 * old_mems_allowed is the same with mems_allowed
3182 * here, except if this task is being moved
3183 * automatically due to hotplug. In that case
3184 * @mems_allowed has been updated and is empty, so
3185 * @old_mems_allowed is the right nodesets that we
3186 * migrate mm from.
3187 */
3188 if (is_memory_migrate(cs)) {
3189 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3190 &cpuset_attach_nodemask_to);
3191 queue_task_work = true;
3192 } else
3193 mmput(mm);
3194 }
3195 }
3196
3197 out:
3198 if (queue_task_work)
3199 schedule_flush_migrate_mm();
3200 cs->old_mems_allowed = cpuset_attach_nodemask_to;
3201
3202 if (cs->nr_migrate_dl_tasks) {
3203 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3204 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3205 reset_migrate_dl_data(cs);
3206 }
3207
3208 dec_attach_in_progress_locked(cs);
3209
3210 mutex_unlock(&cpuset_mutex);
3211 }
3212
3213 /*
3214 * Common handling for a write to a "cpus" or "mems" file.
3215 */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3216 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3217 char *buf, size_t nbytes, loff_t off)
3218 {
3219 struct cpuset *cs = css_cs(of_css(of));
3220 struct cpuset *trialcs;
3221 int retval = -ENODEV;
3222
3223 /* root is read-only */
3224 if (cs == &top_cpuset)
3225 return -EACCES;
3226
3227 buf = strstrip(buf);
3228 cpuset_full_lock();
3229 if (!is_cpuset_online(cs))
3230 goto out_unlock;
3231
3232 trialcs = dup_or_alloc_cpuset(cs);
3233 if (!trialcs) {
3234 retval = -ENOMEM;
3235 goto out_unlock;
3236 }
3237
3238 switch (of_cft(of)->private) {
3239 case FILE_CPULIST:
3240 retval = update_cpumask(cs, trialcs, buf);
3241 break;
3242 case FILE_EXCLUSIVE_CPULIST:
3243 retval = update_exclusive_cpumask(cs, trialcs, buf);
3244 break;
3245 case FILE_MEMLIST:
3246 retval = update_nodemask(cs, trialcs, buf);
3247 break;
3248 default:
3249 retval = -EINVAL;
3250 break;
3251 }
3252
3253 free_cpuset(trialcs);
3254 out_unlock:
3255 cpuset_update_sd_hk_unlock();
3256 if (of_cft(of)->private == FILE_MEMLIST)
3257 schedule_flush_migrate_mm();
3258 return retval ?: nbytes;
3259 }
3260
3261 /*
3262 * These ascii lists should be read in a single call, by using a user
3263 * buffer large enough to hold the entire map. If read in smaller
3264 * chunks, there is no guarantee of atomicity. Since the display format
3265 * used, list of ranges of sequential numbers, is variable length,
3266 * and since these maps can change value dynamically, one could read
3267 * gibberish by doing partial reads while a list was changing.
3268 */
cpuset_common_seq_show(struct seq_file * sf,void * v)3269 int cpuset_common_seq_show(struct seq_file *sf, void *v)
3270 {
3271 struct cpuset *cs = css_cs(seq_css(sf));
3272 cpuset_filetype_t type = seq_cft(sf)->private;
3273 int ret = 0;
3274
3275 spin_lock_irq(&callback_lock);
3276
3277 switch (type) {
3278 case FILE_CPULIST:
3279 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3280 break;
3281 case FILE_MEMLIST:
3282 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3283 break;
3284 case FILE_EFFECTIVE_CPULIST:
3285 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3286 break;
3287 case FILE_EFFECTIVE_MEMLIST:
3288 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3289 break;
3290 case FILE_EXCLUSIVE_CPULIST:
3291 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3292 break;
3293 case FILE_EFFECTIVE_XCPULIST:
3294 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3295 break;
3296 case FILE_SUBPARTS_CPULIST:
3297 seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3298 break;
3299 case FILE_ISOLATED_CPULIST:
3300 seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3301 break;
3302 default:
3303 ret = -EINVAL;
3304 }
3305
3306 spin_unlock_irq(&callback_lock);
3307 return ret;
3308 }
3309
cpuset_partition_show(struct seq_file * seq,void * v)3310 static int cpuset_partition_show(struct seq_file *seq, void *v)
3311 {
3312 struct cpuset *cs = css_cs(seq_css(seq));
3313 const char *err, *type = NULL;
3314
3315 switch (cs->partition_root_state) {
3316 case PRS_ROOT:
3317 seq_puts(seq, "root\n");
3318 break;
3319 case PRS_ISOLATED:
3320 seq_puts(seq, "isolated\n");
3321 break;
3322 case PRS_MEMBER:
3323 seq_puts(seq, "member\n");
3324 break;
3325 case PRS_INVALID_ROOT:
3326 type = "root";
3327 fallthrough;
3328 case PRS_INVALID_ISOLATED:
3329 if (!type)
3330 type = "isolated";
3331 err = perr_strings[READ_ONCE(cs->prs_err)];
3332 if (err)
3333 seq_printf(seq, "%s invalid (%s)\n", type, err);
3334 else
3335 seq_printf(seq, "%s invalid\n", type);
3336 break;
3337 }
3338 return 0;
3339 }
3340
cpuset_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3341 static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
3342 size_t nbytes, loff_t off)
3343 {
3344 struct cpuset *cs = css_cs(of_css(of));
3345 int val;
3346 int retval = -ENODEV;
3347
3348 buf = strstrip(buf);
3349
3350 if (!strcmp(buf, "root"))
3351 val = PRS_ROOT;
3352 else if (!strcmp(buf, "member"))
3353 val = PRS_MEMBER;
3354 else if (!strcmp(buf, "isolated"))
3355 val = PRS_ISOLATED;
3356 else
3357 return -EINVAL;
3358
3359 cpuset_full_lock();
3360 if (is_cpuset_online(cs))
3361 retval = update_prstate(cs, val);
3362 cpuset_update_sd_hk_unlock();
3363 return retval ?: nbytes;
3364 }
3365
3366 /*
3367 * This is currently a minimal set for the default hierarchy. It can be
3368 * expanded later on by migrating more features and control files from v1.
3369 */
3370 static struct cftype dfl_files[] = {
3371 {
3372 .name = "cpus",
3373 .seq_show = cpuset_common_seq_show,
3374 .write = cpuset_write_resmask,
3375 .max_write_len = (100U + 6 * NR_CPUS),
3376 .private = FILE_CPULIST,
3377 .flags = CFTYPE_NOT_ON_ROOT,
3378 },
3379
3380 {
3381 .name = "mems",
3382 .seq_show = cpuset_common_seq_show,
3383 .write = cpuset_write_resmask,
3384 .max_write_len = (100U + 6 * MAX_NUMNODES),
3385 .private = FILE_MEMLIST,
3386 .flags = CFTYPE_NOT_ON_ROOT,
3387 },
3388
3389 {
3390 .name = "cpus.effective",
3391 .seq_show = cpuset_common_seq_show,
3392 .private = FILE_EFFECTIVE_CPULIST,
3393 },
3394
3395 {
3396 .name = "mems.effective",
3397 .seq_show = cpuset_common_seq_show,
3398 .private = FILE_EFFECTIVE_MEMLIST,
3399 },
3400
3401 {
3402 .name = "cpus.partition",
3403 .seq_show = cpuset_partition_show,
3404 .write = cpuset_partition_write,
3405 .private = FILE_PARTITION_ROOT,
3406 .flags = CFTYPE_NOT_ON_ROOT,
3407 .file_offset = offsetof(struct cpuset, partition_file),
3408 },
3409
3410 {
3411 .name = "cpus.exclusive",
3412 .seq_show = cpuset_common_seq_show,
3413 .write = cpuset_write_resmask,
3414 .max_write_len = (100U + 6 * NR_CPUS),
3415 .private = FILE_EXCLUSIVE_CPULIST,
3416 .flags = CFTYPE_NOT_ON_ROOT,
3417 },
3418
3419 {
3420 .name = "cpus.exclusive.effective",
3421 .seq_show = cpuset_common_seq_show,
3422 .private = FILE_EFFECTIVE_XCPULIST,
3423 .flags = CFTYPE_NOT_ON_ROOT,
3424 },
3425
3426 {
3427 .name = "cpus.subpartitions",
3428 .seq_show = cpuset_common_seq_show,
3429 .private = FILE_SUBPARTS_CPULIST,
3430 .flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3431 },
3432
3433 {
3434 .name = "cpus.isolated",
3435 .seq_show = cpuset_common_seq_show,
3436 .private = FILE_ISOLATED_CPULIST,
3437 .flags = CFTYPE_ONLY_ON_ROOT,
3438 },
3439
3440 { } /* terminate */
3441 };
3442
3443
3444 /**
3445 * cpuset_css_alloc - Allocate a cpuset css
3446 * @parent_css: Parent css of the control group that the new cpuset will be
3447 * part of
3448 * Return: cpuset css on success, -ENOMEM on failure.
3449 *
3450 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3451 * top cpuset css otherwise.
3452 */
3453 static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)3454 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3455 {
3456 struct cpuset *cs;
3457
3458 if (!parent_css)
3459 return &top_cpuset.css;
3460
3461 cs = dup_or_alloc_cpuset(NULL);
3462 if (!cs)
3463 return ERR_PTR(-ENOMEM);
3464
3465 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3466 cpuset1_init(cs);
3467
3468 /* Set CS_MEMORY_MIGRATE for default hierarchy */
3469 if (cpuset_v2())
3470 __set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3471
3472 return &cs->css;
3473 }
3474
cpuset_css_online(struct cgroup_subsys_state * css)3475 static int cpuset_css_online(struct cgroup_subsys_state *css)
3476 {
3477 struct cpuset *cs = css_cs(css);
3478 struct cpuset *parent = parent_cs(cs);
3479
3480 if (!parent)
3481 return 0;
3482
3483 cpuset_full_lock();
3484 /*
3485 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3486 */
3487 if (cpuset_v2() && !is_sched_load_balance(parent))
3488 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3489
3490 cpuset_inc();
3491
3492 spin_lock_irq(&callback_lock);
3493 if (is_in_v2_mode()) {
3494 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3495 cs->effective_mems = parent->effective_mems;
3496 }
3497 spin_unlock_irq(&callback_lock);
3498 cpuset1_online_css(css);
3499
3500 cpuset_full_unlock();
3501 return 0;
3502 }
3503
3504 /*
3505 * If the cpuset being removed has its flag 'sched_load_balance'
3506 * enabled, then simulate turning sched_load_balance off, which
3507 * will call rebuild_sched_domains_locked(). That is not needed
3508 * in the default hierarchy where only changes in partition
3509 * will cause repartitioning.
3510 */
cpuset_css_offline(struct cgroup_subsys_state * css)3511 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3512 {
3513 struct cpuset *cs = css_cs(css);
3514
3515 cpuset_full_lock();
3516 if (!cpuset_v2() && is_sched_load_balance(cs))
3517 cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3518
3519 cpuset_dec();
3520 cpuset_full_unlock();
3521 }
3522
3523 /*
3524 * If a dying cpuset has the 'cpus.partition' enabled, turn it off by
3525 * changing it back to member to free its exclusive CPUs back to the pool to
3526 * be used by other online cpusets.
3527 */
cpuset_css_killed(struct cgroup_subsys_state * css)3528 static void cpuset_css_killed(struct cgroup_subsys_state *css)
3529 {
3530 struct cpuset *cs = css_cs(css);
3531
3532 cpuset_full_lock();
3533 /* Reset valid partition back to member */
3534 if (is_partition_valid(cs))
3535 update_prstate(cs, PRS_MEMBER);
3536 cpuset_update_sd_hk_unlock();
3537 }
3538
cpuset_css_free(struct cgroup_subsys_state * css)3539 static void cpuset_css_free(struct cgroup_subsys_state *css)
3540 {
3541 struct cpuset *cs = css_cs(css);
3542
3543 free_cpuset(cs);
3544 }
3545
cpuset_bind(struct cgroup_subsys_state * root_css)3546 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3547 {
3548 mutex_lock(&cpuset_mutex);
3549 spin_lock_irq(&callback_lock);
3550
3551 if (is_in_v2_mode()) {
3552 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3553 cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
3554 top_cpuset.mems_allowed = node_possible_map;
3555 } else {
3556 cpumask_copy(top_cpuset.cpus_allowed,
3557 top_cpuset.effective_cpus);
3558 top_cpuset.mems_allowed = top_cpuset.effective_mems;
3559 }
3560
3561 spin_unlock_irq(&callback_lock);
3562 mutex_unlock(&cpuset_mutex);
3563 }
3564
3565 /*
3566 * In case the child is cloned into a cpuset different from its parent,
3567 * additional checks are done to see if the move is allowed.
3568 */
cpuset_can_fork(struct task_struct * task,struct css_set * cset)3569 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3570 {
3571 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3572 bool same_cs;
3573 int ret;
3574
3575 rcu_read_lock();
3576 same_cs = (cs == task_cs(current));
3577 rcu_read_unlock();
3578
3579 if (same_cs)
3580 return 0;
3581
3582 lockdep_assert_held(&cgroup_mutex);
3583 mutex_lock(&cpuset_mutex);
3584
3585 /* Check to see if task is allowed in the cpuset */
3586 ret = cpuset_can_attach_check(cs);
3587 if (ret)
3588 goto out_unlock;
3589
3590 ret = task_can_attach(task);
3591 if (ret)
3592 goto out_unlock;
3593
3594 ret = security_task_setscheduler(task);
3595 if (ret)
3596 goto out_unlock;
3597
3598 /*
3599 * Mark attach is in progress. This makes validate_change() fail
3600 * changes which zero cpus/mems_allowed.
3601 */
3602 cs->attach_in_progress++;
3603 out_unlock:
3604 mutex_unlock(&cpuset_mutex);
3605 return ret;
3606 }
3607
cpuset_cancel_fork(struct task_struct * task,struct css_set * cset)3608 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3609 {
3610 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3611 bool same_cs;
3612
3613 rcu_read_lock();
3614 same_cs = (cs == task_cs(current));
3615 rcu_read_unlock();
3616
3617 if (same_cs)
3618 return;
3619
3620 dec_attach_in_progress(cs);
3621 }
3622
3623 /*
3624 * Make sure the new task conform to the current state of its parent,
3625 * which could have been changed by cpuset just after it inherits the
3626 * state from the parent and before it sits on the cgroup's task list.
3627 */
cpuset_fork(struct task_struct * task)3628 static void cpuset_fork(struct task_struct *task)
3629 {
3630 struct cpuset *cs;
3631 bool same_cs;
3632
3633 rcu_read_lock();
3634 cs = task_cs(task);
3635 same_cs = (cs == task_cs(current));
3636 rcu_read_unlock();
3637
3638 if (same_cs) {
3639 if (cs == &top_cpuset)
3640 return;
3641
3642 set_cpus_allowed_ptr(task, current->cpus_ptr);
3643 task->mems_allowed = current->mems_allowed;
3644 return;
3645 }
3646
3647 /* CLONE_INTO_CGROUP */
3648 mutex_lock(&cpuset_mutex);
3649 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3650 cpuset_attach_task(cs, task);
3651
3652 dec_attach_in_progress_locked(cs);
3653 mutex_unlock(&cpuset_mutex);
3654 }
3655
3656 struct cgroup_subsys cpuset_cgrp_subsys = {
3657 .css_alloc = cpuset_css_alloc,
3658 .css_online = cpuset_css_online,
3659 .css_offline = cpuset_css_offline,
3660 .css_killed = cpuset_css_killed,
3661 .css_free = cpuset_css_free,
3662 .can_attach = cpuset_can_attach,
3663 .cancel_attach = cpuset_cancel_attach,
3664 .attach = cpuset_attach,
3665 .bind = cpuset_bind,
3666 .can_fork = cpuset_can_fork,
3667 .cancel_fork = cpuset_cancel_fork,
3668 .fork = cpuset_fork,
3669 #ifdef CONFIG_CPUSETS_V1
3670 .legacy_cftypes = cpuset1_files,
3671 #endif
3672 .dfl_cftypes = dfl_files,
3673 .early_init = true,
3674 .threaded = true,
3675 };
3676
3677 /**
3678 * cpuset_init - initialize cpusets at system boot
3679 *
3680 * Description: Initialize top_cpuset
3681 **/
3682
cpuset_init(void)3683 int __init cpuset_init(void)
3684 {
3685 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3686 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3687 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
3688 BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
3689 BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
3690 BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
3691 BUG_ON(!zalloc_cpumask_var(&isolated_hk_cpus, GFP_KERNEL));
3692
3693 cpumask_setall(top_cpuset.cpus_allowed);
3694 nodes_setall(top_cpuset.mems_allowed);
3695 cpumask_setall(top_cpuset.effective_cpus);
3696 cpumask_setall(top_cpuset.effective_xcpus);
3697 cpumask_setall(top_cpuset.exclusive_cpus);
3698 nodes_setall(top_cpuset.effective_mems);
3699
3700 cpuset1_init(&top_cpuset);
3701
3702 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3703
3704 if (housekeeping_enabled(HK_TYPE_DOMAIN_BOOT))
3705 cpumask_andnot(isolated_cpus, cpu_possible_mask,
3706 housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT));
3707
3708 return 0;
3709 }
3710
3711 static void
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3712 hotplug_update_tasks(struct cpuset *cs,
3713 struct cpumask *new_cpus, nodemask_t *new_mems,
3714 bool cpus_updated, bool mems_updated)
3715 {
3716 /* A partition root is allowed to have empty effective cpus */
3717 if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3718 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3719 if (nodes_empty(*new_mems))
3720 *new_mems = parent_cs(cs)->effective_mems;
3721
3722 spin_lock_irq(&callback_lock);
3723 cpumask_copy(cs->effective_cpus, new_cpus);
3724 cs->effective_mems = *new_mems;
3725 spin_unlock_irq(&callback_lock);
3726
3727 if (cpus_updated)
3728 cpuset_update_tasks_cpumask(cs, new_cpus);
3729 if (mems_updated)
3730 cpuset_update_tasks_nodemask(cs);
3731 }
3732
cpuset_force_rebuild(void)3733 void cpuset_force_rebuild(void)
3734 {
3735 force_sd_rebuild = true;
3736 }
3737
3738 /**
3739 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3740 * @cs: cpuset in interest
3741 * @tmp: the tmpmasks structure pointer
3742 *
3743 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3744 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3745 * all its tasks are moved to the nearest ancestor with both resources.
3746 */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3747 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3748 {
3749 static cpumask_t new_cpus;
3750 static nodemask_t new_mems;
3751 bool cpus_updated;
3752 bool mems_updated;
3753 bool remote;
3754 int partcmd = -1;
3755 struct cpuset *parent;
3756 retry:
3757 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3758
3759 mutex_lock(&cpuset_mutex);
3760
3761 /*
3762 * We have raced with task attaching. We wait until attaching
3763 * is finished, so we won't attach a task to an empty cpuset.
3764 */
3765 if (cs->attach_in_progress) {
3766 mutex_unlock(&cpuset_mutex);
3767 goto retry;
3768 }
3769
3770 parent = parent_cs(cs);
3771 compute_effective_cpumask(&new_cpus, cs, parent);
3772 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3773
3774 if (!tmp || !cs->partition_root_state)
3775 goto update_tasks;
3776
3777 /*
3778 * Compute effective_cpus for valid partition root, may invalidate
3779 * child partition roots if necessary.
3780 */
3781 remote = is_remote_partition(cs);
3782 if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
3783 compute_partition_effective_cpumask(cs, &new_cpus);
3784
3785 if (remote && (cpumask_empty(subpartitions_cpus) ||
3786 (cpumask_empty(&new_cpus) &&
3787 partition_is_populated(cs, NULL)))) {
3788 cs->prs_err = PERR_HOTPLUG;
3789 remote_partition_disable(cs, tmp);
3790 compute_effective_cpumask(&new_cpus, cs, parent);
3791 remote = false;
3792 }
3793
3794 /*
3795 * Force the partition to become invalid if either one of
3796 * the following conditions hold:
3797 * 1) empty effective cpus but not valid empty partition.
3798 * 2) parent is invalid or doesn't grant any cpus to child
3799 * partitions.
3800 * 3) subpartitions_cpus is empty.
3801 */
3802 if (is_local_partition(cs) &&
3803 (!is_partition_valid(parent) ||
3804 tasks_nocpu_error(parent, cs, &new_cpus) ||
3805 cpumask_empty(subpartitions_cpus)))
3806 partcmd = partcmd_invalidate;
3807 /*
3808 * On the other hand, an invalid partition root may be transitioned
3809 * back to a regular one with a non-empty effective xcpus.
3810 */
3811 else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
3812 !cpumask_empty(cs->effective_xcpus))
3813 partcmd = partcmd_update;
3814
3815 if (partcmd >= 0) {
3816 update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
3817 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
3818 compute_partition_effective_cpumask(cs, &new_cpus);
3819 cpuset_force_rebuild();
3820 }
3821 }
3822
3823 update_tasks:
3824 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3825 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3826 if (!cpus_updated && !mems_updated)
3827 goto unlock; /* Hotplug doesn't affect this cpuset */
3828
3829 if (mems_updated)
3830 check_insane_mems_config(&new_mems);
3831
3832 if (is_in_v2_mode())
3833 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3834 cpus_updated, mems_updated);
3835 else
3836 cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
3837 cpus_updated, mems_updated);
3838
3839 unlock:
3840 mutex_unlock(&cpuset_mutex);
3841 }
3842
3843 /**
3844 * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3845 *
3846 * This function is called after either CPU or memory configuration has
3847 * changed and updates cpuset accordingly. The top_cpuset is always
3848 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3849 * order to make cpusets transparent (of no affect) on systems that are
3850 * actively using CPU hotplug but making no active use of cpusets.
3851 *
3852 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3853 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3854 * all descendants.
3855 *
3856 * Note that CPU offlining during suspend is ignored. We don't modify
3857 * cpusets across suspend/resume cycles at all.
3858 *
3859 * CPU / memory hotplug is handled synchronously.
3860 */
cpuset_handle_hotplug(void)3861 static void cpuset_handle_hotplug(void)
3862 {
3863 static DECLARE_WORK(hk_sd_work, hk_sd_workfn);
3864 static cpumask_t new_cpus;
3865 static nodemask_t new_mems;
3866 bool cpus_updated, mems_updated;
3867 bool on_dfl = is_in_v2_mode();
3868 struct tmpmasks tmp, *ptmp = NULL;
3869
3870 if (on_dfl && !alloc_tmpmasks(&tmp))
3871 ptmp = &tmp;
3872
3873 lockdep_assert_cpus_held();
3874 mutex_lock(&cpuset_mutex);
3875
3876 /* fetch the available cpus/mems and find out which changed how */
3877 cpumask_copy(&new_cpus, cpu_active_mask);
3878 new_mems = node_states[N_MEMORY];
3879
3880 /*
3881 * If subpartitions_cpus is populated, it is likely that the check
3882 * below will produce a false positive on cpus_updated when the cpu
3883 * list isn't changed. It is extra work, but it is better to be safe.
3884 */
3885 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
3886 !cpumask_empty(subpartitions_cpus);
3887 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3888
3889 /* For v1, synchronize cpus_allowed to cpu_active_mask */
3890 if (cpus_updated) {
3891 cpuset_force_rebuild();
3892 spin_lock_irq(&callback_lock);
3893 if (!on_dfl)
3894 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3895 /*
3896 * Make sure that CPUs allocated to child partitions
3897 * do not show up in effective_cpus. If no CPU is left,
3898 * we clear the subpartitions_cpus & let the child partitions
3899 * fight for the CPUs again.
3900 */
3901 if (!cpumask_empty(subpartitions_cpus)) {
3902 if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
3903 cpumask_clear(subpartitions_cpus);
3904 } else {
3905 cpumask_andnot(&new_cpus, &new_cpus,
3906 subpartitions_cpus);
3907 }
3908 }
3909 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3910 spin_unlock_irq(&callback_lock);
3911 /* we don't mess with cpumasks of tasks in top_cpuset */
3912 }
3913
3914 /* synchronize mems_allowed to N_MEMORY */
3915 if (mems_updated) {
3916 spin_lock_irq(&callback_lock);
3917 if (!on_dfl)
3918 top_cpuset.mems_allowed = new_mems;
3919 top_cpuset.effective_mems = new_mems;
3920 spin_unlock_irq(&callback_lock);
3921 cpuset_update_tasks_nodemask(&top_cpuset);
3922 }
3923
3924 mutex_unlock(&cpuset_mutex);
3925
3926 /* if cpus or mems changed, we need to propagate to descendants */
3927 if (cpus_updated || mems_updated) {
3928 struct cpuset *cs;
3929 struct cgroup_subsys_state *pos_css;
3930
3931 rcu_read_lock();
3932 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3933 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3934 continue;
3935 rcu_read_unlock();
3936
3937 cpuset_hotplug_update_tasks(cs, ptmp);
3938
3939 rcu_read_lock();
3940 css_put(&cs->css);
3941 }
3942 rcu_read_unlock();
3943 }
3944
3945 /*
3946 * rebuild_sched_domains() will always be called directly if needed
3947 * to make sure that newly added or removed CPU will be reflected in
3948 * the sched domains. However, if isolated partition invalidation
3949 * or recreation is being done (update_housekeeping set), a work item
3950 * will be queued to call housekeeping_update() to update the
3951 * corresponding housekeeping cpumasks after some slight delay.
3952 *
3953 * We rely on WORK_STRUCT_PENDING_BIT to not requeue a work item that
3954 * is still pending. Before the pending bit is cleared, the work data
3955 * is copied out and work item dequeued. So it is possible to queue
3956 * the work again before the hk_sd_workfn() is invoked to process the
3957 * previously queued work. Since hk_sd_workfn() doesn't use the work
3958 * item at all, this is not a problem.
3959 */
3960 if (force_sd_rebuild)
3961 rebuild_sched_domains_cpuslocked();
3962 if (update_housekeeping)
3963 queue_work(system_dfl_wq, &hk_sd_work);
3964
3965 free_tmpmasks(ptmp);
3966 }
3967
cpuset_update_active_cpus(void)3968 void cpuset_update_active_cpus(void)
3969 {
3970 /*
3971 * We're inside cpu hotplug critical region which usually nests
3972 * inside cgroup synchronization. Bounce actual hotplug processing
3973 * to a work item to avoid reverse locking order.
3974 */
3975 cpuset_handle_hotplug();
3976 }
3977
3978 /*
3979 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3980 * Call this routine anytime after node_states[N_MEMORY] changes.
3981 * See cpuset_update_active_cpus() for CPU hotplug handling.
3982 */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3983 static int cpuset_track_online_nodes(struct notifier_block *self,
3984 unsigned long action, void *arg)
3985 {
3986 cpuset_handle_hotplug();
3987 return NOTIFY_OK;
3988 }
3989
3990 /**
3991 * cpuset_init_smp - initialize cpus_allowed
3992 *
3993 * Description: Finish top cpuset after cpu, node maps are initialized
3994 */
cpuset_init_smp(void)3995 void __init cpuset_init_smp(void)
3996 {
3997 /*
3998 * cpus_allowd/mems_allowed set to v2 values in the initial
3999 * cpuset_bind() call will be reset to v1 values in another
4000 * cpuset_bind() call when v1 cpuset is mounted.
4001 */
4002 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
4003
4004 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
4005 top_cpuset.effective_mems = node_states[N_MEMORY];
4006
4007 hotplug_node_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
4008
4009 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
4010 BUG_ON(!cpuset_migrate_mm_wq);
4011 }
4012
4013 /*
4014 * Return cpus_allowed mask from a task's cpuset.
4015 */
__cpuset_cpus_allowed_locked(struct task_struct * tsk,struct cpumask * pmask)4016 static void __cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
4017 {
4018 struct cpuset *cs;
4019
4020 cs = task_cs(tsk);
4021 if (cs != &top_cpuset)
4022 guarantee_active_cpus(tsk, pmask);
4023 /*
4024 * Tasks in the top cpuset won't get update to their cpumasks
4025 * when a hotplug online/offline event happens. So we include all
4026 * offline cpus in the allowed cpu list.
4027 */
4028 if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
4029 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4030
4031 /*
4032 * We first exclude cpus allocated to partitions. If there is no
4033 * allowable online cpu left, we fall back to all possible cpus.
4034 */
4035 cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
4036 if (!cpumask_intersects(pmask, cpu_active_mask))
4037 cpumask_copy(pmask, possible_mask);
4038 }
4039 }
4040
4041 /**
4042 * cpuset_cpus_allowed_locked - return cpus_allowed mask from a task's cpuset.
4043 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4044 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4045 *
4046 * Similir to cpuset_cpus_allowed() except that the caller must have acquired
4047 * cpuset_mutex.
4048 */
cpuset_cpus_allowed_locked(struct task_struct * tsk,struct cpumask * pmask)4049 void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
4050 {
4051 lockdep_assert_cpuset_lock_held();
4052 __cpuset_cpus_allowed_locked(tsk, pmask);
4053 }
4054
4055 /**
4056 * cpuset_cpus_allowed - return cpus_allowed mask from a task's cpuset.
4057 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4058 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4059 *
4060 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
4061 * attached to the specified @tsk. Guaranteed to return some non-empty
4062 * subset of cpu_active_mask, even if this means going outside the
4063 * tasks cpuset, except when the task is in the top cpuset.
4064 **/
4065
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)4066 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
4067 {
4068 unsigned long flags;
4069
4070 spin_lock_irqsave(&callback_lock, flags);
4071 __cpuset_cpus_allowed_locked(tsk, pmask);
4072 spin_unlock_irqrestore(&callback_lock, flags);
4073 }
4074
4075 /**
4076 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4077 * @tsk: pointer to task_struct with which the scheduler is struggling
4078 *
4079 * Description: In the case that the scheduler cannot find an allowed cpu in
4080 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4081 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4082 * which will not contain a sane cpumask during cases such as cpu hotplugging.
4083 * This is the absolute last resort for the scheduler and it is only used if
4084 * _every_ other avenue has been traveled.
4085 *
4086 * Returns true if the affinity of @tsk was changed, false otherwise.
4087 **/
4088
cpuset_cpus_allowed_fallback(struct task_struct * tsk)4089 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
4090 {
4091 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4092 const struct cpumask *cs_mask;
4093 bool changed = false;
4094
4095 rcu_read_lock();
4096 cs_mask = task_cs(tsk)->cpus_allowed;
4097 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4098 set_cpus_allowed_force(tsk, cs_mask);
4099 changed = true;
4100 }
4101 rcu_read_unlock();
4102
4103 /*
4104 * We own tsk->cpus_allowed, nobody can change it under us.
4105 *
4106 * But we used cs && cs->cpus_allowed lockless and thus can
4107 * race with cgroup_attach_task() or update_cpumask() and get
4108 * the wrong tsk->cpus_allowed. However, both cases imply the
4109 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4110 * which takes task_rq_lock().
4111 *
4112 * If we are called after it dropped the lock we must see all
4113 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4114 * set any mask even if it is not right from task_cs() pov,
4115 * the pending set_cpus_allowed_ptr() will fix things.
4116 *
4117 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4118 * if required.
4119 */
4120 return changed;
4121 }
4122
cpuset_init_current_mems_allowed(void)4123 void __init cpuset_init_current_mems_allowed(void)
4124 {
4125 nodes_setall(current->mems_allowed);
4126 }
4127
4128 /**
4129 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4130 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4131 *
4132 * Description: Returns the nodemask_t mems_allowed of the cpuset
4133 * attached to the specified @tsk. Guaranteed to return some non-empty
4134 * subset of node_states[N_MEMORY], even if this means going outside the
4135 * tasks cpuset.
4136 **/
4137
cpuset_mems_allowed(struct task_struct * tsk)4138 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4139 {
4140 nodemask_t mask;
4141 unsigned long flags;
4142
4143 spin_lock_irqsave(&callback_lock, flags);
4144 guarantee_online_mems(task_cs(tsk), &mask);
4145 spin_unlock_irqrestore(&callback_lock, flags);
4146
4147 return mask;
4148 }
4149
4150 /**
4151 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4152 * @nodemask: the nodemask to be checked
4153 *
4154 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4155 */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)4156 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4157 {
4158 return nodes_intersects(*nodemask, current->mems_allowed);
4159 }
4160
4161 /*
4162 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4163 * mem_hardwall ancestor to the specified cpuset. Call holding
4164 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
4165 * (an unusual configuration), then returns the root cpuset.
4166 */
nearest_hardwall_ancestor(struct cpuset * cs)4167 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4168 {
4169 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4170 cs = parent_cs(cs);
4171 return cs;
4172 }
4173
4174 /*
4175 * cpuset_current_node_allowed - Can current task allocate on a memory node?
4176 * @node: is this an allowed node?
4177 * @gfp_mask: memory allocation flags
4178 *
4179 * If we're in interrupt, yes, we can always allocate. If @node is set in
4180 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
4181 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4182 * yes. If current has access to memory reserves as an oom victim, yes.
4183 * If the current task is PF_EXITING, yes. Otherwise, no.
4184 *
4185 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4186 * and do not allow allocations outside the current tasks cpuset
4187 * unless the task has been OOM killed or is exiting.
4188 * GFP_KERNEL allocations are not so marked, so can escape to the
4189 * nearest enclosing hardwalled ancestor cpuset.
4190 *
4191 * Scanning up parent cpusets requires callback_lock. The
4192 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4193 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4194 * current tasks mems_allowed came up empty on the first pass over
4195 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
4196 * cpuset are short of memory, might require taking the callback_lock.
4197 *
4198 * The first call here from mm/page_alloc:get_page_from_freelist()
4199 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4200 * so no allocation on a node outside the cpuset is allowed (unless
4201 * in interrupt, of course). The PF_EXITING check must therefore
4202 * come before the __GFP_HARDWALL check, otherwise a dying task
4203 * would be blocked on the fast path.
4204 *
4205 * The second pass through get_page_from_freelist() doesn't even call
4206 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
4207 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4208 * in alloc_flags. That logic and the checks below have the combined
4209 * affect that:
4210 * in_interrupt - any node ok (current task context irrelevant)
4211 * GFP_ATOMIC - any node ok
4212 * tsk_is_oom_victim - any node ok
4213 * PF_EXITING - any node ok (let dying task exit quickly)
4214 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4215 * GFP_USER - only nodes in current tasks mems allowed ok.
4216 */
cpuset_current_node_allowed(int node,gfp_t gfp_mask)4217 bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
4218 {
4219 struct cpuset *cs; /* current cpuset ancestors */
4220 bool allowed; /* is allocation in zone z allowed? */
4221 unsigned long flags;
4222
4223 if (in_interrupt())
4224 return true;
4225 if (node_isset(node, current->mems_allowed))
4226 return true;
4227 /*
4228 * Allow tasks that have access to memory reserves because they have
4229 * been OOM killed to get memory anywhere.
4230 */
4231 if (unlikely(tsk_is_oom_victim(current)))
4232 return true;
4233 if (current->flags & PF_EXITING) /* Let dying task have memory */
4234 return true;
4235 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4236 return false;
4237
4238 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4239 spin_lock_irqsave(&callback_lock, flags);
4240
4241 cs = nearest_hardwall_ancestor(task_cs(current));
4242 allowed = node_isset(node, cs->mems_allowed);
4243
4244 spin_unlock_irqrestore(&callback_lock, flags);
4245 return allowed;
4246 }
4247
4248 /**
4249 * cpuset_nodes_allowed - return effective_mems mask from a cgroup cpuset.
4250 * @cgroup: pointer to struct cgroup.
4251 * @mask: pointer to struct nodemask_t to be returned.
4252 *
4253 * Returns effective_mems mask from a cgroup cpuset if it is cgroup v2 and
4254 * has cpuset subsys. Otherwise, returns node_states[N_MEMORY].
4255 *
4256 * This function intentionally avoids taking the cpuset_mutex or callback_lock
4257 * when accessing effective_mems. This is because the obtained effective_mems
4258 * is stale immediately after the query anyway (e.g., effective_mems is updated
4259 * immediately after releasing the lock but before returning).
4260 *
4261 * As a result, returned @mask may be empty because cs->effective_mems can be
4262 * rebound during this call. Besides, nodes in @mask are not guaranteed to be
4263 * online due to hot plugins. Callers should check the mask for validity on
4264 * return based on its subsequent use.
4265 **/
cpuset_nodes_allowed(struct cgroup * cgroup,nodemask_t * mask)4266 void cpuset_nodes_allowed(struct cgroup *cgroup, nodemask_t *mask)
4267 {
4268 struct cgroup_subsys_state *css;
4269 struct cpuset *cs;
4270
4271 /*
4272 * In v1, mem_cgroup and cpuset are unlikely in the same hierarchy
4273 * and mems_allowed is likely to be empty even if we could get to it,
4274 * so return directly to avoid taking a global lock on the empty check.
4275 */
4276 if (!cgroup || !cpuset_v2()) {
4277 nodes_copy(*mask, node_states[N_MEMORY]);
4278 return;
4279 }
4280
4281 css = cgroup_get_e_css(cgroup, &cpuset_cgrp_subsys);
4282 if (!css) {
4283 nodes_copy(*mask, node_states[N_MEMORY]);
4284 return;
4285 }
4286
4287 /*
4288 * The reference taken via cgroup_get_e_css is sufficient to
4289 * protect css, but it does not imply safe accesses to effective_mems.
4290 *
4291 * Normally, accessing effective_mems would require the cpuset_mutex
4292 * or callback_lock - but the correctness of this information is stale
4293 * immediately after the query anyway. We do not acquire the lock
4294 * during this process to save lock contention in exchange for racing
4295 * against mems_allowed rebinds.
4296 */
4297 cs = container_of(css, struct cpuset, css);
4298 nodes_copy(*mask, cs->effective_mems);
4299 css_put(css);
4300 }
4301
4302 /**
4303 * cpuset_spread_node() - On which node to begin search for a page
4304 * @rotor: round robin rotor
4305 *
4306 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4307 * tasks in a cpuset with is_spread_page or is_spread_slab set),
4308 * and if the memory allocation used cpuset_mem_spread_node()
4309 * to determine on which node to start looking, as it will for
4310 * certain page cache or slab cache pages such as used for file
4311 * system buffers and inode caches, then instead of starting on the
4312 * local node to look for a free page, rather spread the starting
4313 * node around the tasks mems_allowed nodes.
4314 *
4315 * We don't have to worry about the returned node being offline
4316 * because "it can't happen", and even if it did, it would be ok.
4317 *
4318 * The routines calling guarantee_online_mems() are careful to
4319 * only set nodes in task->mems_allowed that are online. So it
4320 * should not be possible for the following code to return an
4321 * offline node. But if it did, that would be ok, as this routine
4322 * is not returning the node where the allocation must be, only
4323 * the node where the search should start. The zonelist passed to
4324 * __alloc_pages() will include all nodes. If the slab allocator
4325 * is passed an offline node, it will fall back to the local node.
4326 * See kmem_cache_alloc_node().
4327 */
cpuset_spread_node(int * rotor)4328 static int cpuset_spread_node(int *rotor)
4329 {
4330 return *rotor = next_node_in(*rotor, current->mems_allowed);
4331 }
4332
4333 /**
4334 * cpuset_mem_spread_node() - On which node to begin search for a file page
4335 */
cpuset_mem_spread_node(void)4336 int cpuset_mem_spread_node(void)
4337 {
4338 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4339 current->cpuset_mem_spread_rotor =
4340 node_random(¤t->mems_allowed);
4341
4342 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
4343 }
4344
4345 /**
4346 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4347 * @tsk1: pointer to task_struct of some task.
4348 * @tsk2: pointer to task_struct of some other task.
4349 *
4350 * Description: Return true if @tsk1's mems_allowed intersects the
4351 * mems_allowed of @tsk2. Used by the OOM killer to determine if
4352 * one of the task's memory usage might impact the memory available
4353 * to the other.
4354 **/
4355
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)4356 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4357 const struct task_struct *tsk2)
4358 {
4359 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4360 }
4361
4362 /**
4363 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4364 *
4365 * Description: Prints current's name, cpuset name, and cached copy of its
4366 * mems_allowed to the kernel log.
4367 */
cpuset_print_current_mems_allowed(void)4368 void cpuset_print_current_mems_allowed(void)
4369 {
4370 struct cgroup *cgrp;
4371
4372 rcu_read_lock();
4373
4374 cgrp = task_cs(current)->css.cgroup;
4375 pr_cont(",cpuset=");
4376 pr_cont_cgroup_name(cgrp);
4377 pr_cont(",mems_allowed=%*pbl",
4378 nodemask_pr_args(¤t->mems_allowed));
4379
4380 rcu_read_unlock();
4381 }
4382
4383 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)4384 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4385 {
4386 seq_printf(m, "Mems_allowed:\t%*pb\n",
4387 nodemask_pr_args(&task->mems_allowed));
4388 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4389 nodemask_pr_args(&task->mems_allowed));
4390 }
4391