Lines Matching +full:cs +full:- +full:enable +full:- +full:mask
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
24 #include "cpuset-internal.h"
49 * node binding, add this key to provide a quick low-cost judgment
63 [PERR_ACCESS] = "Enable partition not permitted",
73 * Exclusive CPUs distributed out to local or remote sub-partitions of
95 * - update_partition_sd_lb()
96 * - update_cpumasks_hier()
97 * - cpuset_update_flag()
98 * - cpuset_hotplug_update_tasks()
99 * - cpuset_handle_hotplug()
103 * Note that update_relax_domain_level() in cpuset-v1.c can still call
111 * 0 - member (not a partition root)
112 * 1 - partition root
113 * 2 - partition root without load balancing (isolated)
114 * -1 - invalid partition root
115 * -2 - invalid isolated partition root
117 * There are 2 types of partitions - local or remote. Local partitions are
131 #define PRS_INVALID_ROOT -1
132 #define PRS_INVALID_ISOLATED -2
150 struct cpuset *cs = task_cs(p); in inc_dl_tasks_cs() local
152 cs->nr_deadline_tasks++; in inc_dl_tasks_cs()
157 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() local
159 cs->nr_deadline_tasks--; in dec_dl_tasks_cs()
162 static inline int is_partition_valid(const struct cpuset *cs) in is_partition_valid() argument
164 return cs->partition_root_state > 0; in is_partition_valid()
167 static inline int is_partition_invalid(const struct cpuset *cs) in is_partition_invalid() argument
169 return cs->partition_root_state < 0; in is_partition_invalid()
175 static inline void make_partition_invalid(struct cpuset *cs) in make_partition_invalid() argument
177 if (cs->partition_root_state > 0) in make_partition_invalid()
178 cs->partition_root_state = -cs->partition_root_state; in make_partition_invalid()
184 static inline void notify_partition_change(struct cpuset *cs, int old_prs) in notify_partition_change() argument
186 if (old_prs == cs->partition_root_state) in notify_partition_change()
188 cgroup_file_notify(&cs->partition_file); in notify_partition_change()
191 if (is_partition_valid(cs)) in notify_partition_change()
192 WRITE_ONCE(cs->prs_err, PERR_NONE); in notify_partition_change()
213 .relax_domain_level = -1,
218 * There are two global locks guarding cpuset structures - cpuset_mutex and
222 * paths that rely on priority inheritance (e.g. scheduler - on RT) for
239 * If a task is only holding callback_lock, then it has read-only
247 * small pieces of code, such as when reading out possibly multi-word
291 * decrease cs->attach_in_progress.
292 * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
294 static inline void dec_attach_in_progress_locked(struct cpuset *cs) in dec_attach_in_progress_locked() argument
298 cs->attach_in_progress--; in dec_attach_in_progress_locked()
299 if (!cs->attach_in_progress) in dec_attach_in_progress_locked()
303 static inline void dec_attach_in_progress(struct cpuset *cs) in dec_attach_in_progress() argument
306 dec_attach_in_progress_locked(cs); in dec_attach_in_progress()
327 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); in is_in_v2_mode()
331 * partition_is_populated - check if partition has tasks
332 * @cs: partition root to be checked
336 * It is assumed that @cs is a valid partition root. @excluded_child should
337 * be non-NULL when this cpuset is going to become a partition itself.
339 static inline bool partition_is_populated(struct cpuset *cs, in partition_is_populated() argument
345 if (cs->css.cgroup->nr_populated_csets) in partition_is_populated()
347 if (!excluded_child && !cs->nr_subparts) in partition_is_populated()
348 return cgroup_is_populated(cs->css.cgroup); in partition_is_populated()
351 cpuset_for_each_child(child, css, cs) { in partition_is_populated()
356 if (cgroup_is_populated(child->css.cgroup)) { in partition_is_populated()
371 * One way or another, we guarantee to return some non-empty subset
380 struct cpuset *cs; in guarantee_active_cpus() local
386 cs = task_cs(tsk); in guarantee_active_cpus()
388 while (!cpumask_intersects(cs->effective_cpus, pmask)) in guarantee_active_cpus()
389 cs = parent_cs(cs); in guarantee_active_cpus()
391 cpumask_and(pmask, pmask, cs->effective_cpus); in guarantee_active_cpus()
401 * One way or another, we guarantee to return some non-empty subset
406 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
408 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
409 cs = parent_cs(cs); in guarantee_online_mems()
410 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
414 * alloc_cpumasks - allocate three cpumasks for cpuset
415 * @cs: the cpuset that have cpumasks to be allocated.
417 * Return: 0 if successful, -ENOMEM otherwise.
419 * Only one of the two input arguments should be non-NULL.
421 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
425 if (cs) { in alloc_cpumasks()
426 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
427 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
428 pmask3 = &cs->effective_xcpus; in alloc_cpumasks()
429 pmask4 = &cs->exclusive_cpus; in alloc_cpumasks()
431 pmask1 = &tmp->new_cpus; in alloc_cpumasks()
432 pmask2 = &tmp->addmask; in alloc_cpumasks()
433 pmask3 = &tmp->delmask; in alloc_cpumasks()
438 return -ENOMEM; in alloc_cpumasks()
458 return -ENOMEM; in alloc_cpumasks()
462 * free_cpumasks - free cpumasks in a tmpmasks structure
463 * @cs: the cpuset that have cpumasks to be free.
466 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
468 if (cs) { in free_cpumasks()
469 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
470 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
471 free_cpumask_var(cs->effective_xcpus); in free_cpumasks()
472 free_cpumask_var(cs->exclusive_cpus); in free_cpumasks()
475 free_cpumask_var(tmp->new_cpus); in free_cpumasks()
476 free_cpumask_var(tmp->addmask); in free_cpumasks()
477 free_cpumask_var(tmp->delmask); in free_cpumasks()
482 * alloc_trial_cpuset - allocate a trial cpuset
483 * @cs: the cpuset that the trial cpuset duplicates
485 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
489 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
498 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
499 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
500 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus); in alloc_trial_cpuset()
501 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus); in alloc_trial_cpuset()
506 * free_cpuset - free the cpuset
507 * @cs: the cpuset to be freed
509 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
511 free_cpumasks(cs, NULL); in free_cpuset()
512 kfree(cs); in free_cpuset()
516 static inline struct cpumask *user_xcpus(struct cpuset *cs) in user_xcpus() argument
518 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed in user_xcpus()
519 : cs->exclusive_cpus; in user_xcpus()
522 static inline bool xcpus_empty(struct cpuset *cs) in xcpus_empty() argument
524 return cpumask_empty(cs->cpus_allowed) && in xcpus_empty()
525 cpumask_empty(cs->exclusive_cpus); in xcpus_empty()
529 * cpusets_are_exclusive() - check if two cpusets are exclusive
544 * validate_change() - Used to validate that any proposed cpuset change
547 * If we replaced the flag and mask values of the current cpuset
552 * 'cur' is the address of an actual, in-use cpuset. Operations
560 * Return 0 if valid, -errno if not.
583 * Cpusets with tasks - existing or newly being attached - can't in validate_change()
586 ret = -ENOSPC; in validate_change()
587 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { in validate_change()
588 if (!cpumask_empty(cur->cpus_allowed) && in validate_change()
589 cpumask_empty(trial->cpus_allowed)) in validate_change()
591 if (!nodes_empty(cur->mems_allowed) && in validate_change()
592 nodes_empty(trial->mems_allowed)) in validate_change()
605 * for non-isolated partition root. At this point, the target in validate_change()
613 ret = -EBUSY; in validate_change()
615 !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial))) in validate_change()
622 ret = -EINVAL; in validate_change()
629 txset = !cpumask_empty(trial->exclusive_cpus); in validate_change()
630 cxset = !cpumask_empty(c->exclusive_cpus); in validate_change()
645 xcpus = trial->exclusive_cpus; in validate_change()
646 acpus = c->cpus_allowed; in validate_change()
648 xcpus = c->exclusive_cpus; in validate_change()
649 acpus = trial->cpus_allowed; in validate_change()
655 nodes_intersects(trial->mems_allowed, c->mems_allowed)) in validate_change()
672 return cpumask_intersects(a->effective_cpus, b->effective_cpus); in cpusets_overlap()
678 if (dattr->relax_domain_level < c->relax_domain_level) in update_domain_attr()
679 dattr->relax_domain_level = c->relax_domain_level; in update_domain_attr()
692 if (cpumask_empty(cp->cpus_allowed)) { in update_domain_attr_tree()
706 /* jump label reference count + the top-level cpuset */ in nr_cpusets()
714 * A 'partial partition' is a set of non-overlapping subsets whose
721 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
732 * cp - cpuset pointer, used (together with pos_css) to perform a
733 * top-down scan of all cpusets. For our purposes, rebuilding
736 * csa - (for CpuSet Array) Array of pointers to all the cpusets
743 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
753 * and merging them using a union-find algorithm.
763 struct cpuset *cp; /* top-down scan of cpusets */ in generate_sched_domains()
823 if (!cpumask_empty(cp->cpus_allowed) && in generate_sched_domains()
825 cpumask_intersects(cp->cpus_allowed, in generate_sched_domains()
830 !cpumask_empty(cp->effective_cpus)) in generate_sched_domains()
840 * non-empty effective_cpus will be saved into csn[]. in generate_sched_domains()
842 if ((cp->partition_root_state == PRS_ROOT) && in generate_sched_domains()
843 !cpumask_empty(cp->effective_cpus)) in generate_sched_domains()
850 if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus)) in generate_sched_domains()
863 uf_node_init(&csa[i]->node); in generate_sched_domains()
874 uf_union(&csa[i]->node, &csa[j]->node); in generate_sched_domains()
881 if (uf_find(&csa[i]->node) == &csa[i]->node) in generate_sched_domains()
902 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a in generate_sched_domains()
912 cpumask_and(doms[i], csa[i]->effective_cpus, in generate_sched_domains()
915 cpumask_copy(doms[i], csa[i]->effective_cpus); in generate_sched_domains()
925 if (uf_find(&csa[j]->node) == &csa[i]->node) { in generate_sched_domains()
934 cpumask_or(dp, dp, csa[j]->effective_cpus); in generate_sched_domains()
960 static void dl_update_tasks_root_domain(struct cpuset *cs) in dl_update_tasks_root_domain() argument
965 if (cs->nr_deadline_tasks == 0) in dl_update_tasks_root_domain()
968 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain()
978 struct cpuset *cs = NULL; in dl_rebuild_rd_accounting() local
996 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in dl_rebuild_rd_accounting()
998 if (cpumask_empty(cs->effective_cpus)) { in dl_rebuild_rd_accounting()
1003 css_get(&cs->css); in dl_rebuild_rd_accounting()
1007 dl_update_tasks_root_domain(cs); in dl_rebuild_rd_accounting()
1010 css_put(&cs->css); in dl_rebuild_rd_accounting()
1018 * If the flag 'sched_load_balance' of any cpuset with non-empty
1020 * which has that flag enabled, or if any cpuset with a non-empty
1031 struct cpuset *cs; in rebuild_sched_domains_locked() local
1058 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1059 if (!is_partition_valid(cs)) { in rebuild_sched_domains_locked()
1063 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1106 * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1107 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1108 * @new_cpus: the temp variable for the new effective_cpus mask
1110 * Iterate through each task of @cs updating its cpus_allowed to the
1121 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) in cpuset_update_tasks_cpumask() argument
1125 bool top_cs = cs == &top_cpuset; in cpuset_update_tasks_cpumask()
1127 css_task_iter_start(&cs->css, 0, &it); in cpuset_update_tasks_cpumask()
1137 if (task->flags & PF_NO_SETAFFINITY) in cpuset_update_tasks_cpumask()
1141 cpumask_and(new_cpus, possible_mask, cs->effective_cpus); in cpuset_update_tasks_cpumask()
1149 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1150 * @new_cpus: the temp variable for the new effective_cpus mask
1151 * @cs: the cpuset the need to recompute the new effective_cpus mask
1157 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1159 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1166 partcmd_enable, /* Enable partition root */
1167 partcmd_enablei, /* Enable isolated partition root */
1173 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1181 static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs) in update_partition_exclusive_flag() argument
1185 if (exclusive && !is_cpu_exclusive(cs)) { in update_partition_exclusive_flag()
1186 if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1)) in update_partition_exclusive_flag()
1188 } else if (!exclusive && is_cpu_exclusive(cs)) { in update_partition_exclusive_flag()
1190 cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_partition_exclusive_flag()
1202 static void update_partition_sd_lb(struct cpuset *cs, int old_prs) in update_partition_sd_lb() argument
1204 int new_prs = cs->partition_root_state; in update_partition_sd_lb()
1209 * If cs is not a valid partition root, the load balance state in update_partition_sd_lb()
1215 new_lb = is_sched_load_balance(parent_cs(cs)); in update_partition_sd_lb()
1217 if (new_lb != !!is_sched_load_balance(cs)) { in update_partition_sd_lb()
1220 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1222 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1230 * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1232 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs, in tasks_nocpu_error() argument
1236 * A populated partition (cs or parent) can't have empty effective_cpus in tasks_nocpu_error()
1238 return (cpumask_subset(parent->effective_cpus, xcpus) && in tasks_nocpu_error()
1239 partition_is_populated(parent, cs)) || in tasks_nocpu_error()
1241 partition_is_populated(cs, NULL)); in tasks_nocpu_error()
1244 static void reset_partition_data(struct cpuset *cs) in reset_partition_data() argument
1246 struct cpuset *parent = parent_cs(cs); in reset_partition_data()
1253 cs->nr_subparts = 0; in reset_partition_data()
1254 if (cpumask_empty(cs->exclusive_cpus)) { in reset_partition_data()
1255 cpumask_clear(cs->effective_xcpus); in reset_partition_data()
1256 if (is_cpu_exclusive(cs)) in reset_partition_data()
1257 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags); in reset_partition_data()
1259 if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed)) in reset_partition_data()
1260 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in reset_partition_data()
1264 * isolated_cpus_update - Update the isolated_cpus mask
1279 * partition_xcpus_add - Add new exclusive CPUs to partition
1301 isolcpus_updated = (new_prs != parent->partition_root_state); in partition_xcpus_add()
1303 isolated_cpus_update(parent->partition_root_state, new_prs, in partition_xcpus_add()
1306 cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus); in partition_xcpus_add()
1311 * partition_xcpus_del - Remove exclusive CPUs from partition
1332 isolcpus_updated = (old_prs != parent->partition_root_state); in partition_xcpus_del()
1334 isolated_cpus_update(old_prs, parent->partition_root_state, in partition_xcpus_del()
1338 cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus); in partition_xcpus_del()
1356 * cpuset_cpu_is_isolated - Check if the given CPU is isolated
1367 * compute_effective_exclusive_cpumask - compute effective exclusive CPUs
1368 * @cs: cpuset
1376 * the given cs is a trial one.
1378 static int compute_effective_exclusive_cpumask(struct cpuset *cs, in compute_effective_exclusive_cpumask() argument
1383 struct cpuset *parent = parent_cs(cs); in compute_effective_exclusive_cpumask()
1388 xcpus = cs->effective_xcpus; in compute_effective_exclusive_cpumask()
1390 cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus); in compute_effective_exclusive_cpumask()
1393 if (!cpumask_empty(cs->exclusive_cpus)) in compute_effective_exclusive_cpumask()
1396 cs = real_cs; in compute_effective_exclusive_cpumask()
1404 if (sibling == cs) in compute_effective_exclusive_cpumask()
1407 if (cpumask_intersects(xcpus, sibling->exclusive_cpus)) { in compute_effective_exclusive_cpumask()
1408 cpumask_andnot(xcpus, xcpus, sibling->exclusive_cpus); in compute_effective_exclusive_cpumask()
1412 if (cpumask_intersects(xcpus, sibling->effective_xcpus)) { in compute_effective_exclusive_cpumask()
1413 cpumask_andnot(xcpus, xcpus, sibling->effective_xcpus); in compute_effective_exclusive_cpumask()
1421 static inline bool is_remote_partition(struct cpuset *cs) in is_remote_partition() argument
1423 return !list_empty(&cs->remote_sibling); in is_remote_partition()
1426 static inline bool is_local_partition(struct cpuset *cs) in is_local_partition() argument
1428 return is_partition_valid(cs) && !is_remote_partition(cs); in is_local_partition()
1432 * remote_partition_enable - Enable current cpuset as a remote partition root
1433 * @cs: the cpuset to update
1438 * Enable the current cpuset to become a remote partition root taking CPUs
1441 static int remote_partition_enable(struct cpuset *cs, int new_prs, in remote_partition_enable() argument
1456 * The effective_xcpus mask can contain offline CPUs, but there must in remote_partition_enable()
1462 compute_effective_exclusive_cpumask(cs, tmp->new_cpus, NULL); in remote_partition_enable()
1463 WARN_ON_ONCE(cpumask_intersects(tmp->new_cpus, subpartitions_cpus)); in remote_partition_enable()
1464 if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) || in remote_partition_enable()
1465 cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus)) in remote_partition_enable()
1469 isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus); in remote_partition_enable()
1470 list_add(&cs->remote_sibling, &remote_children); in remote_partition_enable()
1471 cpumask_copy(cs->effective_xcpus, tmp->new_cpus); in remote_partition_enable()
1475 cs->prs_err = 0; in remote_partition_enable()
1480 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); in remote_partition_enable()
1486 * remote_partition_disable - Remove current cpuset from remote partition list
1487 * @cs: the cpuset to update
1494 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) in remote_partition_disable() argument
1498 WARN_ON_ONCE(!is_remote_partition(cs)); in remote_partition_disable()
1499 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); in remote_partition_disable()
1502 list_del_init(&cs->remote_sibling); in remote_partition_disable()
1503 isolcpus_updated = partition_xcpus_del(cs->partition_root_state, in remote_partition_disable()
1504 NULL, cs->effective_xcpus); in remote_partition_disable()
1505 if (cs->prs_err) in remote_partition_disable()
1506 cs->partition_root_state = -cs->partition_root_state; in remote_partition_disable()
1508 cs->partition_root_state = PRS_MEMBER; in remote_partition_disable()
1511 compute_effective_exclusive_cpumask(cs, NULL, NULL); in remote_partition_disable()
1512 reset_partition_data(cs); in remote_partition_disable()
1520 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); in remote_partition_disable()
1525 * remote_cpus_update - cpus_exclusive change of remote partition
1526 * @cs: the cpuset to be updated
1527 * @xcpus: the new exclusive_cpus mask, if non-NULL
1528 * @excpus: the new effective_xcpus mask
1534 static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus, in remote_cpus_update() argument
1538 int prs = cs->partition_root_state; in remote_cpus_update()
1541 if (WARN_ON_ONCE(!is_remote_partition(cs))) in remote_cpus_update()
1544 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); in remote_cpus_update()
1547 cs->prs_err = PERR_CPUSEMPTY; in remote_cpus_update()
1551 adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus); in remote_cpus_update()
1552 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus); in remote_cpus_update()
1560 WARN_ON_ONCE(cpumask_intersects(tmp->addmask, subpartitions_cpus)); in remote_cpus_update()
1562 cs->prs_err = PERR_ACCESS; in remote_cpus_update()
1563 else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) || in remote_cpus_update()
1564 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)) in remote_cpus_update()
1565 cs->prs_err = PERR_NOCPUS; in remote_cpus_update()
1566 if (cs->prs_err) in remote_cpus_update()
1572 isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask); in remote_cpus_update()
1574 isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask); in remote_cpus_update()
1577 * update_sibling_cpumasks() below may iterate back to the same cs. in remote_cpus_update()
1579 cpumask_copy(cs->effective_xcpus, excpus); in remote_cpus_update()
1581 cpumask_copy(cs->exclusive_cpus, xcpus); in remote_cpus_update()
1590 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); in remote_cpus_update()
1595 remote_partition_disable(cs, tmp); in remote_cpus_update()
1599 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1601 * @new_cpus: cpu mask
1619 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1620 * @cs: The cpuset that requests change in partition root state
1626 * For partcmd_enable*, the cpuset is being transformed from a non-partition
1628 * effective_xcpus not set) mask of the given cpuset will be taken away from
1633 * root back to a non-partition root. Any CPUs in effective_xcpus will be
1655 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, in update_parent_effective_cpumask() argument
1659 struct cpuset *parent = parent_cs(cs); in update_parent_effective_cpumask()
1666 struct cpumask *xcpus = user_xcpus(cs); in update_parent_effective_cpumask()
1670 WARN_ON_ONCE(is_remote_partition(cs)); /* For local partition only */ in update_parent_effective_cpumask()
1677 old_prs = new_prs = cs->partition_root_state; in update_parent_effective_cpumask()
1687 adding = cpumask_and(tmp->addmask, in update_parent_effective_cpumask()
1688 xcpus, parent->effective_xcpus); in update_parent_effective_cpumask()
1690 new_prs = -old_prs; in update_parent_effective_cpumask()
1691 subparts_delta--; in update_parent_effective_cpumask()
1705 if (!newmask && xcpus_empty(cs)) in update_parent_effective_cpumask()
1708 nocpu = tasks_nocpu_error(parent, cs, xcpus); in update_parent_effective_cpumask()
1716 xcpus = tmp->delmask; in update_parent_effective_cpumask()
1717 if (compute_effective_exclusive_cpumask(cs, xcpus, NULL)) in update_parent_effective_cpumask()
1718 WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus)); in update_parent_effective_cpumask()
1742 * (cs->effective_xcpus & cpu_active_mask) ⊆ parent->effective_cpus in update_parent_effective_cpumask()
1746 cpumask_and(tmp->new_cpus, xcpus, cpu_active_mask); in update_parent_effective_cpumask()
1747 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus)); in update_parent_effective_cpumask()
1759 if (is_partition_valid(cs)) { in update_parent_effective_cpumask()
1760 cpumask_copy(tmp->addmask, cs->effective_xcpus); in update_parent_effective_cpumask()
1762 subparts_delta--; in update_parent_effective_cpumask()
1774 /* Check newmask again, whether cpus are available for parent/cs */ in update_parent_effective_cpumask()
1775 nocpu |= tasks_nocpu_error(parent, cs, newmask); in update_parent_effective_cpumask()
1780 * Compute add/delete mask to/from effective_cpus in update_parent_effective_cpumask()
1784 * & parent->effective_xcpus in update_parent_effective_cpumask()
1786 * & parent->effective_xcpus in update_parent_effective_cpumask()
1789 * delmask = newmask & parent->effective_xcpus in update_parent_effective_cpumask()
1793 deleting = cpumask_and(tmp->delmask, in update_parent_effective_cpumask()
1794 newmask, parent->effective_xcpus); in update_parent_effective_cpumask()
1796 cpumask_andnot(tmp->addmask, xcpus, newmask); in update_parent_effective_cpumask()
1797 adding = cpumask_and(tmp->addmask, tmp->addmask, in update_parent_effective_cpumask()
1798 parent->effective_xcpus); in update_parent_effective_cpumask()
1800 cpumask_andnot(tmp->delmask, newmask, xcpus); in update_parent_effective_cpumask()
1801 deleting = cpumask_and(tmp->delmask, tmp->delmask, in update_parent_effective_cpumask()
1802 parent->effective_xcpus); in update_parent_effective_cpumask()
1809 cpumask_and(tmp->new_cpus, tmp->delmask, cpu_active_mask); in update_parent_effective_cpumask()
1810 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus)); in update_parent_effective_cpumask()
1818 !cpumask_intersects(tmp->addmask, cpu_active_mask))) { in update_parent_effective_cpumask()
1821 adding = cpumask_and(tmp->addmask, in update_parent_effective_cpumask()
1822 xcpus, parent->effective_xcpus); in update_parent_effective_cpumask()
1828 * delmask = effective_xcpus & parent->effective_cpus in update_parent_effective_cpumask()
1843 if (is_partition_valid(cs)) in update_parent_effective_cpumask()
1844 adding = cpumask_and(tmp->addmask, in update_parent_effective_cpumask()
1845 xcpus, parent->effective_xcpus); in update_parent_effective_cpumask()
1846 } else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) && in update_parent_effective_cpumask()
1847 cpumask_subset(xcpus, parent->effective_xcpus)) { in update_parent_effective_cpumask()
1858 if (child == cs) in update_parent_effective_cpumask()
1860 if (!cpusets_are_exclusive(cs, child)) { in update_parent_effective_cpumask()
1867 deleting = cpumask_and(tmp->delmask, in update_parent_effective_cpumask()
1868 xcpus, parent->effective_cpus); in update_parent_effective_cpumask()
1876 WRITE_ONCE(cs->prs_err, part_error); in update_parent_effective_cpumask()
1883 switch (cs->partition_root_state) { in update_parent_effective_cpumask()
1887 new_prs = -old_prs; in update_parent_effective_cpumask()
1888 subparts_delta--; in update_parent_effective_cpumask()
1894 new_prs = -old_prs; in update_parent_effective_cpumask()
1908 * CPU lists in cs haven't been updated yet. So defer it to later. in update_parent_effective_cpumask()
1911 int err = update_partition_exclusive_flag(cs, new_prs); in update_parent_effective_cpumask()
1926 cs->partition_root_state = new_prs; in update_parent_effective_cpumask()
1928 cs->nr_subparts = 0; in update_parent_effective_cpumask()
1931 * Adding to parent's effective_cpus means deletion CPUs from cs in update_parent_effective_cpumask()
1936 tmp->addmask); in update_parent_effective_cpumask()
1939 tmp->delmask); in update_parent_effective_cpumask()
1942 parent->nr_subparts += subparts_delta; in update_parent_effective_cpumask()
1943 WARN_ON_ONCE(parent->nr_subparts < 0); in update_parent_effective_cpumask()
1949 update_partition_exclusive_flag(cs, new_prs); in update_parent_effective_cpumask()
1952 cpuset_update_tasks_cpumask(parent, tmp->addmask); in update_parent_effective_cpumask()
1953 update_sibling_cpumasks(parent, cs, tmp); in update_parent_effective_cpumask()
1962 update_partition_sd_lb(cs, old_prs); in update_parent_effective_cpumask()
1964 notify_partition_change(cs, old_prs); in update_parent_effective_cpumask()
1969 * compute_partition_effective_cpumask - compute effective_cpus for partition
1970 * @cs: partition root cpuset
1984 static void compute_partition_effective_cpumask(struct cpuset *cs, in compute_partition_effective_cpumask() argument
1989 bool populated = partition_is_populated(cs, NULL); in compute_partition_effective_cpumask()
1999 compute_effective_exclusive_cpumask(cs, new_ecpus, NULL); in compute_partition_effective_cpumask()
2003 cpuset_for_each_child(child, css, cs) { in compute_partition_effective_cpumask()
2012 child->prs_err = 0; in compute_partition_effective_cpumask()
2013 if (!cpumask_subset(child->effective_xcpus, in compute_partition_effective_cpumask()
2014 cs->effective_xcpus)) in compute_partition_effective_cpumask()
2015 child->prs_err = PERR_INVCPUS; in compute_partition_effective_cpumask()
2017 cpumask_subset(new_ecpus, child->effective_xcpus)) in compute_partition_effective_cpumask()
2018 child->prs_err = PERR_NOCPUS; in compute_partition_effective_cpumask()
2020 if (child->prs_err) { in compute_partition_effective_cpumask()
2021 int old_prs = child->partition_root_state; in compute_partition_effective_cpumask()
2028 cs->nr_subparts--; in compute_partition_effective_cpumask()
2029 child->nr_subparts = 0; in compute_partition_effective_cpumask()
2035 child->effective_xcpus); in compute_partition_effective_cpumask()
2041 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2042 * @cs: the cpuset to consider
2053 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, in update_cpumasks_hier() argument
2062 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
2067 old_prs = new_prs = cp->partition_root_state; in update_cpumasks_hier()
2070 * For child remote partition root (!= cs), we need to call in update_cpumasks_hier()
2074 * remote_cpus_update() will reuse tmp->new_cpus only after in update_cpumasks_hier()
2077 if (remote && (cp != cs)) { in update_cpumasks_hier()
2078 compute_effective_exclusive_cpumask(cp, tmp->new_cpus, NULL); in update_cpumasks_hier()
2079 if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) { in update_cpumasks_hier()
2084 remote_cpus_update(cp, NULL, tmp->new_cpus, tmp); in update_cpumasks_hier()
2088 new_prs = cp->partition_root_state; in update_cpumasks_hier()
2093 compute_partition_effective_cpumask(cp, tmp->new_cpus); in update_cpumasks_hier()
2095 compute_effective_cpumask(tmp->new_cpus, cp, parent); in update_cpumasks_hier()
2105 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) { in update_cpumasks_hier()
2111 * If it becomes empty, inherit the effective mask of the in update_cpumasks_hier()
2116 if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus)) in update_cpumasks_hier()
2117 cpumask_copy(tmp->new_cpus, parent->effective_cpus); in update_cpumasks_hier()
2126 if (!cp->partition_root_state && !force && in update_cpumasks_hier()
2127 cpumask_equal(tmp->new_cpus, cp->effective_cpus) && in update_cpumasks_hier()
2137 * for cs already in update_cpumask(). We should also call in update_cpumasks_hier()
2141 if ((cp != cs) && old_prs) { in update_cpumasks_hier()
2142 switch (parent->partition_root_state) { in update_cpumasks_hier()
2155 new_prs = -cp->partition_root_state; in update_cpumasks_hier()
2156 WRITE_ONCE(cp->prs_err, in update_cpumasks_hier()
2163 if (!css_tryget_online(&cp->css)) in update_cpumasks_hier()
2173 new_prs = cp->partition_root_state; in update_cpumasks_hier()
2177 cpumask_copy(cp->effective_cpus, tmp->new_cpus); in update_cpumasks_hier()
2178 cp->partition_root_state = new_prs; in update_cpumasks_hier()
2179 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) in update_cpumasks_hier()
2186 if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus)) in update_cpumasks_hier()
2187 cpumask_and(cp->effective_xcpus, in update_cpumasks_hier()
2188 cp->cpus_allowed, parent->effective_xcpus); in update_cpumasks_hier()
2196 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); in update_cpumasks_hier()
2198 cpuset_update_tasks_cpumask(cp, cp->effective_cpus); in update_cpumasks_hier()
2208 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); in update_cpumasks_hier()
2210 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); in update_cpumasks_hier()
2214 * On legacy hierarchy, if the effective cpumask of any non- in update_cpumasks_hier()
2219 if (!cpumask_empty(cp->cpus_allowed) && in update_cpumasks_hier()
2225 css_put(&cp->css); in update_cpumasks_hier()
2234 * update_sibling_cpumasks - Update siblings cpumasks
2236 * @cs: Current cpuset
2239 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
2261 if (sibling == cs) in update_sibling_cpumasks()
2264 compute_effective_cpumask(tmp->new_cpus, sibling, in update_sibling_cpumasks()
2266 if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus)) in update_sibling_cpumasks()
2276 if (!css_tryget_online(&sibling->css)) in update_sibling_cpumasks()
2282 css_put(&sibling->css); in update_sibling_cpumasks()
2288 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2289 * @cs: the cpuset to consider
2293 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
2298 struct cpuset *parent = parent_cs(cs); in update_cpumask()
2301 int old_prs = cs->partition_root_state; in update_cpumask()
2303 /* top_cpuset.cpus_allowed tracks cpu_active_mask; it's read-only */ in update_cpumask()
2304 if (cs == &top_cpuset) in update_cpumask()
2305 return -EACCES; in update_cpumask()
2309 * Since cpulist_parse() fails on an empty mask, we special case in update_cpumask()
2314 cpumask_clear(trialcs->cpus_allowed); in update_cpumask()
2315 if (cpumask_empty(trialcs->exclusive_cpus)) in update_cpumask()
2316 cpumask_clear(trialcs->effective_xcpus); in update_cpumask()
2318 retval = cpulist_parse(buf, trialcs->cpus_allowed); in update_cpumask()
2322 if (!cpumask_subset(trialcs->cpus_allowed, in update_cpumask()
2324 return -EINVAL; in update_cpumask()
2329 * trialcs->effective_xcpus is used as a temporary cpumask in update_cpumask()
2332 trialcs->partition_root_state = PRS_MEMBER; in update_cpumask()
2333 if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs)) in update_cpumask()
2334 compute_effective_exclusive_cpumask(trialcs, NULL, cs); in update_cpumask()
2338 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
2342 return -ENOMEM; in update_cpumask()
2345 if (is_partition_valid(cs) && in update_cpumask()
2346 cpumask_empty(trialcs->effective_xcpus)) { in update_cpumask()
2348 cs->prs_err = PERR_INVCPUS; in update_cpumask()
2349 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) { in update_cpumask()
2351 cs->prs_err = PERR_HKEEPING; in update_cpumask()
2352 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { in update_cpumask()
2354 cs->prs_err = PERR_NOCPUS; in update_cpumask()
2362 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); in update_cpumask()
2364 retval = validate_change(cs, trialcs); in update_cpumask()
2366 if ((retval == -EINVAL) && cpuset_v2()) { in update_cpumask()
2371 * The -EINVAL error code indicates that partition sibling in update_cpumask()
2383 cpumask_intersects(xcpus, cp->effective_xcpus)) { in update_cpumask()
2396 if (is_partition_valid(cs) || in update_cpumask()
2397 (is_partition_invalid(cs) && !invalidate)) { in update_cpumask()
2398 struct cpumask *xcpus = trialcs->effective_xcpus; in update_cpumask()
2400 if (cpumask_empty(xcpus) && is_partition_invalid(cs)) in update_cpumask()
2401 xcpus = trialcs->cpus_allowed; in update_cpumask()
2406 if (is_remote_partition(cs)) in update_cpumask()
2407 remote_cpus_update(cs, NULL, xcpus, &tmp); in update_cpumask()
2409 update_parent_effective_cpumask(cs, partcmd_invalidate, in update_cpumask()
2412 update_parent_effective_cpumask(cs, partcmd_update, in update_cpumask()
2417 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
2418 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); in update_cpumask()
2419 if ((old_prs > 0) && !is_partition_valid(cs)) in update_cpumask()
2420 reset_partition_data(cs); in update_cpumask()
2424 update_cpumasks_hier(cs, &tmp, force); in update_cpumask()
2427 if (cs->partition_root_state) in update_cpumask()
2428 update_partition_sd_lb(cs, old_prs); in update_cpumask()
2435 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2436 * @cs: the cpuset to consider
2440 * The tasks' cpumask will be updated if cs is a valid partition root.
2442 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_exclusive_cpumask() argument
2447 struct cpuset *parent = parent_cs(cs); in update_exclusive_cpumask()
2450 int old_prs = cs->partition_root_state; in update_exclusive_cpumask()
2453 cpumask_clear(trialcs->exclusive_cpus); in update_exclusive_cpumask()
2454 cpumask_clear(trialcs->effective_xcpus); in update_exclusive_cpumask()
2456 retval = cpulist_parse(buf, trialcs->exclusive_cpus); in update_exclusive_cpumask()
2462 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus)) in update_exclusive_cpumask()
2466 trialcs->partition_root_state = PRS_MEMBER; in update_exclusive_cpumask()
2471 if (compute_effective_exclusive_cpumask(trialcs, NULL, cs)) in update_exclusive_cpumask()
2472 return -EINVAL; in update_exclusive_cpumask()
2479 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); in update_exclusive_cpumask()
2481 retval = validate_change(cs, trialcs); in update_exclusive_cpumask()
2486 return -ENOMEM; in update_exclusive_cpumask()
2489 if (cpumask_empty(trialcs->effective_xcpus)) { in update_exclusive_cpumask()
2491 cs->prs_err = PERR_INVCPUS; in update_exclusive_cpumask()
2492 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) { in update_exclusive_cpumask()
2494 cs->prs_err = PERR_HKEEPING; in update_exclusive_cpumask()
2495 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { in update_exclusive_cpumask()
2497 cs->prs_err = PERR_NOCPUS; in update_exclusive_cpumask()
2500 if (is_remote_partition(cs)) { in update_exclusive_cpumask()
2502 remote_partition_disable(cs, &tmp); in update_exclusive_cpumask()
2504 remote_cpus_update(cs, trialcs->exclusive_cpus, in update_exclusive_cpumask()
2505 trialcs->effective_xcpus, &tmp); in update_exclusive_cpumask()
2507 update_parent_effective_cpumask(cs, partcmd_invalidate, in update_exclusive_cpumask()
2510 update_parent_effective_cpumask(cs, partcmd_update, in update_exclusive_cpumask()
2511 trialcs->effective_xcpus, &tmp); in update_exclusive_cpumask()
2515 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus); in update_exclusive_cpumask()
2516 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); in update_exclusive_cpumask()
2517 if ((old_prs > 0) && !is_partition_valid(cs)) in update_exclusive_cpumask()
2518 reset_partition_data(cs); in update_exclusive_cpumask()
2526 if (is_partition_valid(cs) || force) in update_exclusive_cpumask()
2527 update_cpumasks_hier(cs, &tmp, force); in update_exclusive_cpumask()
2530 if (cs->partition_root_state) in update_exclusive_cpumask()
2531 update_partition_sd_lb(cs, old_prs); in update_exclusive_cpumask()
2558 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); in cpuset_migrate_mm_workfn()
2559 mmput(mwork->mm); in cpuset_migrate_mm_workfn()
2575 mwork->mm = mm; in cpuset_migrate_mm()
2576 mwork->from = *from; in cpuset_migrate_mm()
2577 mwork->to = *to; in cpuset_migrate_mm()
2578 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); in cpuset_migrate_mm()
2579 queue_work(cpuset_migrate_mm_wq, &mwork->work); in cpuset_migrate_mm()
2591 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2595 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2606 write_seqcount_begin(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
2608 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); in cpuset_change_task_nodemask()
2610 tsk->mems_allowed = *newmems; in cpuset_change_task_nodemask()
2612 write_seqcount_end(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
2621 * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2622 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2624 * Iterate through each task of @cs updating its mems_allowed to the
2628 void cpuset_update_tasks_nodemask(struct cpuset *cs) in cpuset_update_tasks_nodemask() argument
2634 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in cpuset_update_tasks_nodemask()
2636 guarantee_online_mems(cs, &newmems); in cpuset_update_tasks_nodemask()
2640 * take while holding tasklist_lock. Forks can happen - the in cpuset_update_tasks_nodemask()
2648 css_task_iter_start(&cs->css, 0, &it); in cpuset_update_tasks_nodemask()
2659 migrate = is_memory_migrate(cs); in cpuset_update_tasks_nodemask()
2661 mpol_rebind_mm(mm, &cs->mems_allowed); in cpuset_update_tasks_nodemask()
2663 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in cpuset_update_tasks_nodemask()
2671 * cs->old_mems_allowed. in cpuset_update_tasks_nodemask()
2673 cs->old_mems_allowed = newmems; in cpuset_update_tasks_nodemask()
2680 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2681 * @cs: the cpuset to consider
2691 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
2697 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
2700 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); in update_nodemasks_hier()
2703 * If it becomes empty, inherit the effective mask of the in update_nodemasks_hier()
2707 *new_mems = parent->effective_mems; in update_nodemasks_hier()
2710 if (nodes_equal(*new_mems, cp->effective_mems)) { in update_nodemasks_hier()
2715 if (!css_tryget_online(&cp->css)) in update_nodemasks_hier()
2720 cp->effective_mems = *new_mems; in update_nodemasks_hier()
2724 !nodes_equal(cp->mems_allowed, cp->effective_mems)); in update_nodemasks_hier()
2729 css_put(&cp->css); in update_nodemasks_hier()
2743 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2744 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2747 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
2754 * it's read-only in update_nodemask()
2756 if (cs == &top_cpuset) { in update_nodemask()
2757 retval = -EACCES; in update_nodemask()
2763 * Since nodelist_parse() fails on an empty mask, we special case in update_nodemask()
2768 nodes_clear(trialcs->mems_allowed); in update_nodemask()
2770 retval = nodelist_parse(buf, trialcs->mems_allowed); in update_nodemask()
2774 if (!nodes_subset(trialcs->mems_allowed, in update_nodemask()
2776 retval = -EINVAL; in update_nodemask()
2781 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
2782 retval = 0; /* Too easy - nothing to do */ in update_nodemask()
2785 retval = validate_change(cs, trialcs); in update_nodemask()
2789 check_insane_mems_config(&trialcs->mems_allowed); in update_nodemask()
2792 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
2795 /* use trialcs->mems_allowed as a temp variable */ in update_nodemask()
2796 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
2813 * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2815 * cs: the cpuset to update
2821 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in cpuset_update_flag() argument
2829 trialcs = alloc_trial_cpuset(cs); in cpuset_update_flag()
2831 return -ENOMEM; in cpuset_update_flag()
2834 set_bit(bit, &trialcs->flags); in cpuset_update_flag()
2836 clear_bit(bit, &trialcs->flags); in cpuset_update_flag()
2838 err = validate_change(cs, trialcs); in cpuset_update_flag()
2842 balance_flag_changed = (is_sched_load_balance(cs) != in cpuset_update_flag()
2845 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in cpuset_update_flag()
2846 || (is_spread_page(cs) != is_spread_page(trialcs))); in cpuset_update_flag()
2849 cs->flags = trialcs->flags; in cpuset_update_flag()
2852 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) { in cpuset_update_flag()
2860 cpuset1_update_tasks_flags(cs); in cpuset_update_flag()
2867 * update_prstate - update partition_root_state
2868 * @cs: the cpuset to update
2874 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
2876 int err = PERR_NONE, old_prs = cs->partition_root_state; in update_prstate()
2877 struct cpuset *parent = parent_cs(cs); in update_prstate()
2891 return -ENOMEM; in update_prstate()
2893 err = update_partition_exclusive_flag(cs, new_prs); in update_prstate()
2901 if (xcpus_empty(cs)) { in update_prstate()
2914 cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) { in update_prstate()
2920 * If parent is valid partition, enable local partiion. in update_prstate()
2921 * Otherwise, enable a remote partition. in update_prstate()
2927 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask); in update_prstate()
2929 err = remote_partition_enable(cs, new_prs, &tmpmask); in update_prstate()
2942 if (is_remote_partition(cs)) in update_prstate()
2943 remote_partition_disable(cs, &tmpmask); in update_prstate()
2945 update_parent_effective_cpumask(cs, partcmd_disable, in update_prstate()
2959 new_prs = -new_prs; in update_prstate()
2960 update_partition_exclusive_flag(cs, new_prs); in update_prstate()
2964 cs->partition_root_state = new_prs; in update_prstate()
2965 WRITE_ONCE(cs->prs_err, err); in update_prstate()
2966 if (!is_partition_valid(cs)) in update_prstate()
2967 reset_partition_data(cs); in update_prstate()
2969 isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus); in update_prstate()
2974 update_cpumasks_hier(cs, &tmpmask, !new_prs); in update_prstate()
2978 && cpumask_empty(cs->effective_xcpus)); in update_prstate()
2981 update_partition_sd_lb(cs, old_prs); in update_prstate()
2983 notify_partition_change(cs, old_prs); in update_prstate()
2998 static int cpuset_can_attach_check(struct cpuset *cs) in cpuset_can_attach_check() argument
3000 if (cpumask_empty(cs->effective_cpus) || in cpuset_can_attach_check()
3001 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) in cpuset_can_attach_check()
3002 return -ENOSPC; in cpuset_can_attach_check()
3006 static void reset_migrate_dl_data(struct cpuset *cs) in reset_migrate_dl_data() argument
3008 cs->nr_migrate_dl_tasks = 0; in reset_migrate_dl_data()
3009 cs->sum_migrate_dl_bw = 0; in reset_migrate_dl_data()
3016 struct cpuset *cs, *oldcs; in cpuset_can_attach() local
3024 cs = css_cs(css); in cpuset_can_attach()
3029 ret = cpuset_can_attach_check(cs); in cpuset_can_attach()
3033 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); in cpuset_can_attach()
3034 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_can_attach()
3053 cs->nr_migrate_dl_tasks++; in cpuset_can_attach()
3054 cs->sum_migrate_dl_bw += task->dl.dl_bw; in cpuset_can_attach()
3058 if (!cs->nr_migrate_dl_tasks) in cpuset_can_attach()
3061 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { in cpuset_can_attach()
3062 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); in cpuset_can_attach()
3065 reset_migrate_dl_data(cs); in cpuset_can_attach()
3066 ret = -EINVAL; in cpuset_can_attach()
3070 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); in cpuset_can_attach()
3072 reset_migrate_dl_data(cs); in cpuset_can_attach()
3082 cs->attach_in_progress++; in cpuset_can_attach()
3091 struct cpuset *cs; in cpuset_cancel_attach() local
3094 cs = css_cs(css); in cpuset_cancel_attach()
3097 dec_attach_in_progress_locked(cs); in cpuset_cancel_attach()
3099 if (cs->nr_migrate_dl_tasks) { in cpuset_cancel_attach()
3100 int cpu = cpumask_any(cs->effective_cpus); in cpuset_cancel_attach()
3102 dl_bw_free(cpu, cs->sum_migrate_dl_bw); in cpuset_cancel_attach()
3103 reset_migrate_dl_data(cs); in cpuset_cancel_attach()
3117 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) in cpuset_attach_task() argument
3121 if (cs != &top_cpuset) in cpuset_attach_task()
3133 cpuset1_update_task_spread_flags(cs, task); in cpuset_attach_task()
3141 struct cpuset *cs; in cpuset_attach() local
3146 cs = css_cs(css); in cpuset_attach()
3150 cpus_updated = !cpumask_equal(cs->effective_cpus, in cpuset_attach()
3151 oldcs->effective_cpus); in cpuset_attach()
3152 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_attach()
3161 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
3165 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
3168 cpuset_attach_task(cs, task); in cpuset_attach()
3176 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
3177 if (!is_memory_migrate(cs) && !mems_updated) in cpuset_attach()
3194 if (is_memory_migrate(cs)) in cpuset_attach()
3195 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, in cpuset_attach()
3203 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
3205 if (cs->nr_migrate_dl_tasks) { in cpuset_attach()
3206 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; in cpuset_attach()
3207 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; in cpuset_attach()
3208 reset_migrate_dl_data(cs); in cpuset_attach()
3211 dec_attach_in_progress_locked(cs); in cpuset_attach()
3222 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
3224 int retval = -ENODEV; in cpuset_write_resmask()
3229 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
3232 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
3234 retval = -ENOMEM; in cpuset_write_resmask()
3238 switch (of_cft(of)->private) { in cpuset_write_resmask()
3240 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
3243 retval = update_exclusive_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
3246 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
3249 retval = -EINVAL; in cpuset_write_resmask()
3273 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
3274 cpuset_filetype_t type = seq_cft(sf)->private; in cpuset_common_seq_show()
3281 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
3284 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
3287 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
3290 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
3293 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus)); in cpuset_common_seq_show()
3296 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus)); in cpuset_common_seq_show()
3305 ret = -EINVAL; in cpuset_common_seq_show()
3314 struct cpuset *cs = css_cs(seq_css(seq)); in cpuset_partition_show() local
3317 switch (cs->partition_root_state) { in cpuset_partition_show()
3333 err = perr_strings[READ_ONCE(cs->prs_err)]; in cpuset_partition_show()
3346 struct cpuset *cs = css_cs(of_css(of)); in cpuset_partition_write() local
3348 int retval = -ENODEV; in cpuset_partition_write()
3359 return -EINVAL; in cpuset_partition_write()
3363 if (is_cpuset_online(cs)) in cpuset_partition_write()
3364 retval = update_prstate(cs, val); in cpuset_partition_write()
3449 * cpuset_css_alloc - Allocate a cpuset css
3452 * Return: cpuset css on success, -ENOMEM on failure.
3454 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3460 struct cpuset *cs; in cpuset_css_alloc() local
3465 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
3466 if (!cs) in cpuset_css_alloc()
3467 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
3469 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
3470 kfree(cs); in cpuset_css_alloc()
3471 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
3474 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
3475 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
3476 cs->relax_domain_level = -1; in cpuset_css_alloc()
3477 INIT_LIST_HEAD(&cs->remote_sibling); in cpuset_css_alloc()
3481 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); in cpuset_css_alloc()
3483 return &cs->css; in cpuset_css_alloc()
3488 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
3489 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
3499 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
3501 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
3503 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
3508 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_online()
3514 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
3515 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
3519 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) in cpuset_css_online()
3525 * historical reasons - the flag may be specified during mount. in cpuset_css_online()
3528 * refuse to clone the configuration - thereby refusing the task to in cpuset_css_online()
3532 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive in cpuset_css_online()
3545 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
3546 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
3547 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
3548 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
3565 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
3570 if (!cpuset_v2() && is_sched_load_balance(cs)) in cpuset_css_offline()
3571 cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
3574 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
3587 struct cpuset *cs = css_cs(css); in cpuset_css_killed() local
3593 if (is_partition_valid(cs)) in cpuset_css_killed()
3594 update_prstate(cs, PRS_MEMBER); in cpuset_css_killed()
3603 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
3605 free_cpuset(cs); in cpuset_css_free()
3633 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_can_fork() local
3638 same_cs = (cs == task_cs(current)); in cpuset_can_fork()
3648 ret = cpuset_can_attach_check(cs); in cpuset_can_fork()
3664 cs->attach_in_progress++; in cpuset_can_fork()
3672 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_cancel_fork() local
3676 same_cs = (cs == task_cs(current)); in cpuset_cancel_fork()
3682 dec_attach_in_progress(cs); in cpuset_cancel_fork()
3692 struct cpuset *cs; in cpuset_fork() local
3696 cs = task_cs(task); in cpuset_fork()
3697 same_cs = (cs == task_cs(current)); in cpuset_fork()
3701 if (cs == &top_cpuset) in cpuset_fork()
3704 set_cpus_allowed_ptr(task, current->cpus_ptr); in cpuset_fork()
3705 task->mems_allowed = current->mems_allowed; in cpuset_fork()
3711 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_fork()
3712 cpuset_attach_task(cs, task); in cpuset_fork()
3714 dec_attach_in_progress_locked(cs); in cpuset_fork()
3741 * cpuset_init - initialize cpusets at system boot
3778 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3783 if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) in hotplug_update_tasks()
3784 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3786 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3789 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3790 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3794 cpuset_update_tasks_cpumask(cs, new_cpus); in hotplug_update_tasks()
3796 cpuset_update_tasks_nodemask(cs); in hotplug_update_tasks()
3805 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3806 * @cs: cpuset in interest
3809 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3810 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3813 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3820 int partcmd = -1; in cpuset_hotplug_update_tasks()
3823 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3831 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3836 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3837 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3838 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3840 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3847 remote = is_remote_partition(cs); in cpuset_hotplug_update_tasks()
3848 if (remote || (is_partition_valid(cs) && is_partition_valid(parent))) in cpuset_hotplug_update_tasks()
3849 compute_partition_effective_cpumask(cs, &new_cpus); in cpuset_hotplug_update_tasks()
3852 partition_is_populated(cs, NULL)) { in cpuset_hotplug_update_tasks()
3853 cs->prs_err = PERR_HOTPLUG; in cpuset_hotplug_update_tasks()
3854 remote_partition_disable(cs, tmp); in cpuset_hotplug_update_tasks()
3855 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3866 if (is_local_partition(cs) && (!is_partition_valid(parent) || in cpuset_hotplug_update_tasks()
3867 tasks_nocpu_error(parent, cs, &new_cpus))) in cpuset_hotplug_update_tasks()
3871 * back to a regular one with a non-empty effective xcpus. in cpuset_hotplug_update_tasks()
3873 else if (is_partition_valid(parent) && is_partition_invalid(cs) && in cpuset_hotplug_update_tasks()
3874 !cpumask_empty(cs->effective_xcpus)) in cpuset_hotplug_update_tasks()
3878 update_parent_effective_cpumask(cs, partcmd, NULL, tmp); in cpuset_hotplug_update_tasks()
3879 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) { in cpuset_hotplug_update_tasks()
3880 compute_partition_effective_cpumask(cs, &new_cpus); in cpuset_hotplug_update_tasks()
3886 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3887 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3895 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3898 cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3906 * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3914 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3990 struct cpuset *cs; in cpuset_handle_hotplug() local
3994 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_handle_hotplug()
3995 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_handle_hotplug()
3999 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_handle_hotplug()
4002 css_put(&cs->css); in cpuset_handle_hotplug()
4037 * cpuset_init_smp - initialize cpus_allowed
4060 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
4061 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4065 * attached to the specified @tsk. Guaranteed to return some non-empty
4073 struct cpuset *cs; in cpuset_cpus_allowed() local
4078 cs = task_cs(tsk); in cpuset_cpus_allowed()
4079 if (cs != &top_cpuset) in cpuset_cpus_allowed()
4086 if ((cs == &top_cpuset) || cpumask_empty(pmask)) { in cpuset_cpus_allowed()
4103 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4107 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4108 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4123 cs_mask = task_cs(tsk)->cpus_allowed; in cpuset_cpus_allowed_fallback()
4131 * We own tsk->cpus_allowed, nobody can change it under us. in cpuset_cpus_allowed_fallback()
4133 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
4135 * the wrong tsk->cpus_allowed. However, both cases imply the in cpuset_cpus_allowed_fallback()
4136 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() in cpuset_cpus_allowed_fallback()
4140 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary in cpuset_cpus_allowed_fallback()
4141 * set any mask even if it is not right from task_cs() pov, in cpuset_cpus_allowed_fallback()
4152 nodes_setall(current->mems_allowed); in cpuset_init_current_mems_allowed()
4156 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4157 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4160 * attached to the specified @tsk. Guaranteed to return some non-empty
4167 nodemask_t mask; in cpuset_mems_allowed() local
4172 guarantee_online_mems(task_cs(tsk), &mask); in cpuset_mems_allowed()
4176 return mask; in cpuset_mems_allowed()
4180 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4183 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4187 return nodes_intersects(*nodemask, current->mems_allowed); in cpuset_nodemask_valid_mems_allowed()
4191 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4196 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
4198 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
4199 cs = parent_cs(cs); in nearest_hardwall_ancestor()
4200 return cs; in nearest_hardwall_ancestor()
4204 * cpuset_current_node_allowed - Can current task allocate on a memory node?
4237 * in_interrupt - any node ok (current task context irrelevant)
4238 * GFP_ATOMIC - any node ok
4239 * tsk_is_oom_victim - any node ok
4240 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4241 * GFP_USER - only nodes in current tasks mems allowed ok.
4245 struct cpuset *cs; /* current cpuset ancestors */ in cpuset_current_node_allowed() local
4251 if (node_isset(node, current->mems_allowed)) in cpuset_current_node_allowed()
4262 if (current->flags & PF_EXITING) /* Let dying task have memory */ in cpuset_current_node_allowed()
4269 cs = nearest_hardwall_ancestor(task_cs(current)); in cpuset_current_node_allowed()
4270 allowed = node_isset(node, cs->mems_allowed); in cpuset_current_node_allowed()
4280 struct cpuset *cs; in cpuset_node_allowed() local
4297 * or callback_lock - but node_isset is atomic and the reference in cpuset_node_allowed()
4301 * relax locking here to avoid taking global locks - while accepting in cpuset_node_allowed()
4307 cs = container_of(css, struct cpuset, css); in cpuset_node_allowed()
4308 allowed = node_isset(nid, cs->effective_mems); in cpuset_node_allowed()
4314 * cpuset_spread_node() - On which node to begin search for a page
4330 * only set nodes in task->mems_allowed that are online. So it
4341 return *rotor = next_node_in(*rotor, current->mems_allowed); in cpuset_spread_node()
4345 * cpuset_mem_spread_node() - On which node to begin search for a file page
4349 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) in cpuset_mem_spread_node()
4350 current->cpuset_mem_spread_rotor = in cpuset_mem_spread_node()
4351 node_random(¤t->mems_allowed); in cpuset_mem_spread_node()
4353 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); in cpuset_mem_spread_node()
4357 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4370 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); in cpuset_mems_allowed_intersects()
4374 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4385 cgrp = task_cs(current)->css.cgroup; in cpuset_print_current_mems_allowed()
4389 nodemask_pr_args(¤t->mems_allowed)); in cpuset_print_current_mems_allowed()
4398 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()
4400 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()