Lines Matching refs:cs
150 struct cpuset *cs = task_cs(p); in inc_dl_tasks_cs() local
152 cs->nr_deadline_tasks++; in inc_dl_tasks_cs()
157 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() local
159 cs->nr_deadline_tasks--; in dec_dl_tasks_cs()
162 static inline bool is_partition_valid(const struct cpuset *cs) in is_partition_valid() argument
164 return cs->partition_root_state > 0; in is_partition_valid()
167 static inline bool is_partition_invalid(const struct cpuset *cs) in is_partition_invalid() argument
169 return cs->partition_root_state < 0; in is_partition_invalid()
172 static inline bool cs_is_member(const struct cpuset *cs) in cs_is_member() argument
174 return cs->partition_root_state == PRS_MEMBER; in cs_is_member()
180 static inline void make_partition_invalid(struct cpuset *cs) in make_partition_invalid() argument
182 if (cs->partition_root_state > 0) in make_partition_invalid()
183 cs->partition_root_state = -cs->partition_root_state; in make_partition_invalid()
189 static inline void notify_partition_change(struct cpuset *cs, int old_prs) in notify_partition_change() argument
191 if (old_prs == cs->partition_root_state) in notify_partition_change()
193 cgroup_file_notify(&cs->partition_file); in notify_partition_change()
196 if (is_partition_valid(cs)) in notify_partition_change()
197 WRITE_ONCE(cs->prs_err, PERR_NONE); in notify_partition_change()
323 static inline void dec_attach_in_progress_locked(struct cpuset *cs) in dec_attach_in_progress_locked() argument
327 cs->attach_in_progress--; in dec_attach_in_progress_locked()
328 if (!cs->attach_in_progress) in dec_attach_in_progress_locked()
332 static inline void dec_attach_in_progress(struct cpuset *cs) in dec_attach_in_progress() argument
335 dec_attach_in_progress_locked(cs); in dec_attach_in_progress()
359 static inline bool cpuset_is_populated(struct cpuset *cs) in cpuset_is_populated() argument
364 return cgroup_is_populated(cs->css.cgroup) || in cpuset_is_populated()
365 cs->attach_in_progress; in cpuset_is_populated()
382 static inline bool partition_is_populated(struct cpuset *cs, in partition_is_populated() argument
393 if (cs->css.cgroup->nr_populated_csets || in partition_is_populated()
394 cs->attach_in_progress) in partition_is_populated()
398 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in partition_is_populated()
399 if (cp == cs || cp == excluded_child) in partition_is_populated()
431 struct cpuset *cs; in guarantee_active_cpus() local
437 cs = task_cs(tsk); in guarantee_active_cpus()
439 while (!cpumask_intersects(cs->effective_cpus, pmask)) in guarantee_active_cpus()
440 cs = parent_cs(cs); in guarantee_active_cpus()
442 cpumask_and(pmask, pmask, cs->effective_cpus); in guarantee_active_cpus()
457 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
459 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
460 cs = parent_cs(cs); in guarantee_online_mems()
461 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
532 static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs) in dup_or_alloc_cpuset() argument
537 trial = cs ? kmemdup(cs, sizeof(*cs), GFP_KERNEL) : in dup_or_alloc_cpuset()
538 kzalloc(sizeof(*cs), GFP_KERNEL); in dup_or_alloc_cpuset()
556 if (cs) { in dup_or_alloc_cpuset()
557 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in dup_or_alloc_cpuset()
558 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in dup_or_alloc_cpuset()
559 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus); in dup_or_alloc_cpuset()
560 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus); in dup_or_alloc_cpuset()
570 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
572 free_cpumask_var(cs->cpus_allowed); in free_cpuset()
573 free_cpumask_var(cs->effective_cpus); in free_cpuset()
574 free_cpumask_var(cs->effective_xcpus); in free_cpuset()
575 free_cpumask_var(cs->exclusive_cpus); in free_cpuset()
576 kfree(cs); in free_cpuset()
580 static inline struct cpumask *user_xcpus(struct cpuset *cs) in user_xcpus() argument
582 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed in user_xcpus()
583 : cs->exclusive_cpus; in user_xcpus()
586 static inline bool xcpus_empty(struct cpuset *cs) in xcpus_empty() argument
588 return cpumask_empty(cs->cpus_allowed) && in xcpus_empty()
589 cpumask_empty(cs->exclusive_cpus); in xcpus_empty()
1038 static void dl_update_tasks_root_domain(struct cpuset *cs) in dl_update_tasks_root_domain() argument
1043 if (cs->nr_deadline_tasks == 0) in dl_update_tasks_root_domain()
1046 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain()
1056 struct cpuset *cs = NULL; in dl_rebuild_rd_accounting() local
1074 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in dl_rebuild_rd_accounting()
1076 if (cpumask_empty(cs->effective_cpus)) { in dl_rebuild_rd_accounting()
1081 css_get(&cs->css); in dl_rebuild_rd_accounting()
1085 dl_update_tasks_root_domain(cs); in dl_rebuild_rd_accounting()
1088 css_put(&cs->css); in dl_rebuild_rd_accounting()
1109 struct cpuset *cs; in rebuild_sched_domains_locked() local
1136 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1137 if (!is_partition_valid(cs)) { in rebuild_sched_domains_locked()
1141 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1199 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) in cpuset_update_tasks_cpumask() argument
1203 bool top_cs = cs == &top_cpuset; in cpuset_update_tasks_cpumask()
1205 css_task_iter_start(&cs->css, 0, &it); in cpuset_update_tasks_cpumask()
1219 cpumask_and(new_cpus, possible_mask, cs->effective_cpus); in cpuset_update_tasks_cpumask()
1235 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1237 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1251 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1259 static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs) in update_partition_exclusive_flag() argument
1263 if (exclusive && !is_cpu_exclusive(cs)) { in update_partition_exclusive_flag()
1264 if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1)) in update_partition_exclusive_flag()
1266 } else if (!exclusive && is_cpu_exclusive(cs)) { in update_partition_exclusive_flag()
1268 cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_partition_exclusive_flag()
1280 static void update_partition_sd_lb(struct cpuset *cs, int old_prs) in update_partition_sd_lb() argument
1282 int new_prs = cs->partition_root_state; in update_partition_sd_lb()
1293 new_lb = is_sched_load_balance(parent_cs(cs)); in update_partition_sd_lb()
1295 if (new_lb != !!is_sched_load_balance(cs)) { in update_partition_sd_lb()
1298 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1300 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1310 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs, in tasks_nocpu_error() argument
1317 partition_is_populated(parent, cs)) || in tasks_nocpu_error()
1319 partition_is_populated(cs, NULL)); in tasks_nocpu_error()
1322 static void reset_partition_data(struct cpuset *cs) in reset_partition_data() argument
1324 struct cpuset *parent = parent_cs(cs); in reset_partition_data()
1331 if (cpumask_empty(cs->exclusive_cpus)) { in reset_partition_data()
1332 cpumask_clear(cs->effective_xcpus); in reset_partition_data()
1333 if (is_cpu_exclusive(cs)) in reset_partition_data()
1334 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags); in reset_partition_data()
1336 if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed)) in reset_partition_data()
1337 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in reset_partition_data()
1515 static int rm_siblings_excl_cpus(struct cpuset *parent, struct cpuset *cs, in rm_siblings_excl_cpus() argument
1530 if (sibling == cs) in rm_siblings_excl_cpus()
1557 static int compute_excpus(struct cpuset *cs, struct cpumask *excpus) in compute_excpus() argument
1559 struct cpuset *parent = parent_cs(cs); in compute_excpus()
1561 cpumask_and(excpus, user_xcpus(cs), parent->effective_xcpus); in compute_excpus()
1563 if (!cpumask_empty(cs->exclusive_cpus)) in compute_excpus()
1566 return rm_siblings_excl_cpus(parent, cs, excpus); in compute_excpus()
1578 static int compute_trialcs_excpus(struct cpuset *trialcs, struct cpuset *cs) in compute_trialcs_excpus() argument
1584 if (cs_is_member(cs)) in compute_trialcs_excpus()
1590 return rm_siblings_excl_cpus(parent, cs, excpus); in compute_trialcs_excpus()
1593 static inline bool is_remote_partition(struct cpuset *cs) in is_remote_partition() argument
1595 return cs->remote_partition; in is_remote_partition()
1598 static inline bool is_local_partition(struct cpuset *cs) in is_local_partition() argument
1600 return is_partition_valid(cs) && !is_remote_partition(cs); in is_local_partition()
1613 static int remote_partition_enable(struct cpuset *cs, int new_prs, in remote_partition_enable() argument
1632 compute_excpus(cs, tmp->new_cpus); in remote_partition_enable()
1644 cs->remote_partition = true; in remote_partition_enable()
1645 cpumask_copy(cs->effective_xcpus, tmp->new_cpus); in remote_partition_enable()
1649 cs->prs_err = 0; in remote_partition_enable()
1668 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) in remote_partition_disable() argument
1670 WARN_ON_ONCE(!is_remote_partition(cs)); in remote_partition_disable()
1671 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); in remote_partition_disable()
1674 cs->remote_partition = false; in remote_partition_disable()
1675 partition_xcpus_del(cs->partition_root_state, NULL, cs->effective_xcpus); in remote_partition_disable()
1676 if (cs->prs_err) in remote_partition_disable()
1677 cs->partition_root_state = -cs->partition_root_state; in remote_partition_disable()
1679 cs->partition_root_state = PRS_MEMBER; in remote_partition_disable()
1682 compute_excpus(cs, cs->effective_xcpus); in remote_partition_disable()
1683 reset_partition_data(cs); in remote_partition_disable()
1705 static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus, in remote_cpus_update() argument
1709 int prs = cs->partition_root_state; in remote_cpus_update()
1711 if (WARN_ON_ONCE(!is_remote_partition(cs))) in remote_cpus_update()
1714 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); in remote_cpus_update()
1717 cs->prs_err = PERR_CPUSEMPTY; in remote_cpus_update()
1721 adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus); in remote_cpus_update()
1722 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus); in remote_cpus_update()
1732 cs->prs_err = PERR_ACCESS; in remote_cpus_update()
1735 cs->prs_err = PERR_NOCPUS; in remote_cpus_update()
1738 cs->prs_err = PERR_HKEEPING; in remote_cpus_update()
1739 if (cs->prs_err) in remote_cpus_update()
1752 cpumask_copy(cs->effective_xcpus, excpus); in remote_cpus_update()
1754 cpumask_copy(cs->exclusive_cpus, xcpus); in remote_cpus_update()
1768 remote_partition_disable(cs, tmp); in remote_cpus_update()
1808 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, in update_parent_effective_cpumask() argument
1812 struct cpuset *parent = parent_cs(cs); in update_parent_effective_cpumask()
1817 struct cpumask *xcpus = user_xcpus(cs); in update_parent_effective_cpumask()
1822 WARN_ON_ONCE(is_remote_partition(cs)); /* For local partition only */ in update_parent_effective_cpumask()
1829 old_prs = new_prs = cs->partition_root_state; in update_parent_effective_cpumask()
1832 if (is_partition_invalid(cs)) in update_parent_effective_cpumask()
1856 if (!newmask && xcpus_empty(cs)) in update_parent_effective_cpumask()
1859 nocpu = tasks_nocpu_error(parent, cs, xcpus); in update_parent_effective_cpumask()
1868 if (compute_excpus(cs, xcpus)) in update_parent_effective_cpumask()
1869 WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus)); in update_parent_effective_cpumask()
1886 if (tasks_nocpu_error(parent, cs, xcpus)) in update_parent_effective_cpumask()
1909 if (is_partition_valid(cs)) { in update_parent_effective_cpumask()
1910 cpumask_copy(tmp->addmask, cs->effective_xcpus); in update_parent_effective_cpumask()
1924 nocpu |= tasks_nocpu_error(parent, cs, newmask); in update_parent_effective_cpumask()
1941 if (is_partition_invalid(cs)) { in update_parent_effective_cpumask()
1960 if (is_partition_valid(cs) && (old_prs != parent_prs)) { in update_parent_effective_cpumask()
2009 if (is_partition_valid(cs)) in update_parent_effective_cpumask()
2012 } else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) && in update_parent_effective_cpumask()
2024 if (child == cs) in update_parent_effective_cpumask()
2026 if (!cpusets_are_exclusive(cs, child)) { in update_parent_effective_cpumask()
2042 WRITE_ONCE(cs->prs_err, part_error); in update_parent_effective_cpumask()
2049 switch (cs->partition_root_state) { in update_parent_effective_cpumask()
2073 int err = update_partition_exclusive_flag(cs, new_prs); in update_parent_effective_cpumask()
2088 cs->partition_root_state = new_prs; in update_parent_effective_cpumask()
2103 update_partition_exclusive_flag(cs, new_prs); in update_parent_effective_cpumask()
2107 update_sibling_cpumasks(parent, cs, tmp); in update_parent_effective_cpumask()
2116 update_partition_sd_lb(cs, old_prs); in update_parent_effective_cpumask()
2118 notify_partition_change(cs, old_prs); in update_parent_effective_cpumask()
2138 static void compute_partition_effective_cpumask(struct cpuset *cs, in compute_partition_effective_cpumask() argument
2143 bool populated = partition_is_populated(cs, NULL); in compute_partition_effective_cpumask()
2153 compute_excpus(cs, new_ecpus); in compute_partition_effective_cpumask()
2157 cpuset_for_each_child(child, css, cs) { in compute_partition_effective_cpumask()
2168 cs->effective_xcpus)) in compute_partition_effective_cpumask()
2205 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, in update_cpumasks_hier() argument
2213 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
2228 if (remote && (cp != cs)) { in update_cpumasks_hier()
2292 if ((cp != cs) && old_prs) { in update_cpumasks_hier()
2330 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) in update_cpumasks_hier()
2387 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
2409 if (sibling == cs) in update_sibling_cpumasks()
2458 static enum prs_errcode validate_partition(struct cpuset *cs, struct cpuset *trialcs) in validate_partition() argument
2460 struct cpuset *parent = parent_cs(cs); in validate_partition()
2472 if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) in validate_partition()
2478 static int cpus_allowed_validate_change(struct cpuset *cs, struct cpuset *trialcs, in cpus_allowed_validate_change() argument
2482 struct cpuset *parent = parent_cs(cs); in cpus_allowed_validate_change()
2484 retval = validate_change(cs, trialcs); in cpus_allowed_validate_change()
2524 static void partition_cpus_change(struct cpuset *cs, struct cpuset *trialcs, in partition_cpus_change() argument
2529 if (cs_is_member(cs)) in partition_cpus_change()
2532 prs_err = validate_partition(cs, trialcs); in partition_cpus_change()
2534 trialcs->prs_err = cs->prs_err = prs_err; in partition_cpus_change()
2536 if (is_remote_partition(cs)) { in partition_cpus_change()
2538 remote_partition_disable(cs, tmp); in partition_cpus_change()
2540 remote_cpus_update(cs, trialcs->exclusive_cpus, in partition_cpus_change()
2544 update_parent_effective_cpumask(cs, partcmd_invalidate, in partition_cpus_change()
2547 update_parent_effective_cpumask(cs, partcmd_update, in partition_cpus_change()
2558 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
2564 int old_prs = cs->partition_root_state; in update_cpumask()
2571 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
2577 compute_trialcs_excpus(trialcs, cs); in update_cpumask()
2580 retval = cpus_allowed_validate_change(cs, trialcs, &tmp); in update_cpumask()
2588 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); in update_cpumask()
2590 partition_cpus_change(cs, trialcs, &tmp); in update_cpumask()
2593 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
2594 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); in update_cpumask()
2595 if ((old_prs > 0) && !is_partition_valid(cs)) in update_cpumask()
2596 reset_partition_data(cs); in update_cpumask()
2600 update_cpumasks_hier(cs, &tmp, force); in update_cpumask()
2603 if (cs->partition_root_state) in update_cpumask()
2604 update_partition_sd_lb(cs, old_prs); in update_cpumask()
2618 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_exclusive_cpumask() argument
2624 int old_prs = cs->partition_root_state; in update_exclusive_cpumask()
2631 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus)) in update_exclusive_cpumask()
2638 if (compute_trialcs_excpus(trialcs, cs)) in update_exclusive_cpumask()
2645 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); in update_exclusive_cpumask()
2647 retval = validate_change(cs, trialcs); in update_exclusive_cpumask()
2655 partition_cpus_change(cs, trialcs, &tmp); in update_exclusive_cpumask()
2658 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus); in update_exclusive_cpumask()
2659 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); in update_exclusive_cpumask()
2660 if ((old_prs > 0) && !is_partition_valid(cs)) in update_exclusive_cpumask()
2661 reset_partition_data(cs); in update_exclusive_cpumask()
2669 if (is_partition_valid(cs) || force) in update_exclusive_cpumask()
2670 update_cpumasks_hier(cs, &tmp, force); in update_exclusive_cpumask()
2673 if (cs->partition_root_state) in update_exclusive_cpumask()
2674 update_partition_sd_lb(cs, old_prs); in update_exclusive_cpumask()
2786 void cpuset_update_tasks_nodemask(struct cpuset *cs) in cpuset_update_tasks_nodemask() argument
2792 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in cpuset_update_tasks_nodemask()
2794 guarantee_online_mems(cs, &newmems); in cpuset_update_tasks_nodemask()
2806 css_task_iter_start(&cs->css, 0, &it); in cpuset_update_tasks_nodemask()
2817 migrate = is_memory_migrate(cs); in cpuset_update_tasks_nodemask()
2819 mpol_rebind_mm(mm, &cs->mems_allowed); in cpuset_update_tasks_nodemask()
2821 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in cpuset_update_tasks_nodemask()
2831 cs->old_mems_allowed = newmems; in cpuset_update_tasks_nodemask()
2849 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
2855 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
2905 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
2923 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) in update_nodemask()
2926 retval = validate_change(cs, trialcs); in update_nodemask()
2933 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
2937 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
2961 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in cpuset_update_flag() argument
2969 trialcs = dup_or_alloc_cpuset(cs); in cpuset_update_flag()
2978 err = validate_change(cs, trialcs); in cpuset_update_flag()
2982 balance_flag_changed = (is_sched_load_balance(cs) != in cpuset_update_flag()
2985 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in cpuset_update_flag()
2986 || (is_spread_page(cs) != is_spread_page(trialcs))); in cpuset_update_flag()
2989 cs->flags = trialcs->flags; in cpuset_update_flag()
3000 cpuset1_update_tasks_flags(cs); in cpuset_update_flag()
3014 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
3016 int err = PERR_NONE, old_prs = cs->partition_root_state; in update_prstate()
3017 struct cpuset *parent = parent_cs(cs); in update_prstate()
3027 if (new_prs && is_partition_invalid(cs)) in update_prstate()
3033 err = update_partition_exclusive_flag(cs, new_prs); in update_prstate()
3041 if (xcpus_empty(cs)) { in update_prstate()
3054 cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) { in update_prstate()
3067 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask); in update_prstate()
3069 err = remote_partition_enable(cs, new_prs, &tmpmask); in update_prstate()
3077 !isolated_cpus_can_update(cs->effective_xcpus, NULL)) || in update_prstate()
3078 prstate_housekeeping_conflict(new_prs, cs->effective_xcpus)) in update_prstate()
3087 if (is_remote_partition(cs)) in update_prstate()
3088 remote_partition_disable(cs, &tmpmask); in update_prstate()
3090 update_parent_effective_cpumask(cs, partcmd_disable, in update_prstate()
3105 update_partition_exclusive_flag(cs, new_prs); in update_prstate()
3109 cs->partition_root_state = new_prs; in update_prstate()
3110 WRITE_ONCE(cs->prs_err, err); in update_prstate()
3111 if (!is_partition_valid(cs)) in update_prstate()
3112 reset_partition_data(cs); in update_prstate()
3114 isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus); in update_prstate()
3119 update_cpumasks_hier(cs, &tmpmask, !new_prs); in update_prstate()
3123 && cpumask_empty(cs->effective_xcpus)); in update_prstate()
3126 update_partition_sd_lb(cs, old_prs); in update_prstate()
3128 notify_partition_change(cs, old_prs); in update_prstate()
3143 static int cpuset_can_attach_check(struct cpuset *cs) in cpuset_can_attach_check() argument
3145 if (cpumask_empty(cs->effective_cpus) || in cpuset_can_attach_check()
3146 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) in cpuset_can_attach_check()
3151 static void reset_migrate_dl_data(struct cpuset *cs) in reset_migrate_dl_data() argument
3153 cs->nr_migrate_dl_tasks = 0; in reset_migrate_dl_data()
3154 cs->sum_migrate_dl_bw = 0; in reset_migrate_dl_data()
3161 struct cpuset *cs, *oldcs; in cpuset_can_attach() local
3169 cs = css_cs(css); in cpuset_can_attach()
3174 ret = cpuset_can_attach_check(cs); in cpuset_can_attach()
3178 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); in cpuset_can_attach()
3179 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_can_attach()
3198 cs->nr_migrate_dl_tasks++; in cpuset_can_attach()
3199 cs->sum_migrate_dl_bw += task->dl.dl_bw; in cpuset_can_attach()
3203 if (!cs->nr_migrate_dl_tasks) in cpuset_can_attach()
3206 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { in cpuset_can_attach()
3207 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); in cpuset_can_attach()
3210 reset_migrate_dl_data(cs); in cpuset_can_attach()
3215 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); in cpuset_can_attach()
3217 reset_migrate_dl_data(cs); in cpuset_can_attach()
3227 cs->attach_in_progress++; in cpuset_can_attach()
3236 struct cpuset *cs; in cpuset_cancel_attach() local
3239 cs = css_cs(css); in cpuset_cancel_attach()
3242 dec_attach_in_progress_locked(cs); in cpuset_cancel_attach()
3244 if (cs->nr_migrate_dl_tasks) { in cpuset_cancel_attach()
3245 int cpu = cpumask_any(cs->effective_cpus); in cpuset_cancel_attach()
3247 dl_bw_free(cpu, cs->sum_migrate_dl_bw); in cpuset_cancel_attach()
3248 reset_migrate_dl_data(cs); in cpuset_cancel_attach()
3262 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) in cpuset_attach_task() argument
3266 if (cs != &top_cpuset) in cpuset_attach_task()
3278 cpuset1_update_task_spread_flags(cs, task); in cpuset_attach_task()
3286 struct cpuset *cs; in cpuset_attach() local
3292 cs = css_cs(css); in cpuset_attach()
3296 cpus_updated = !cpumask_equal(cs->effective_cpus, in cpuset_attach()
3298 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_attach()
3307 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
3311 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
3314 cpuset_attach_task(cs, task); in cpuset_attach()
3322 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
3323 if (!is_memory_migrate(cs) && !mems_updated) in cpuset_attach()
3340 if (is_memory_migrate(cs)) { in cpuset_attach()
3352 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
3354 if (cs->nr_migrate_dl_tasks) { in cpuset_attach()
3355 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; in cpuset_attach()
3356 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; in cpuset_attach()
3357 reset_migrate_dl_data(cs); in cpuset_attach()
3360 dec_attach_in_progress_locked(cs); in cpuset_attach()
3371 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
3376 if (cs == &top_cpuset) in cpuset_write_resmask()
3381 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
3384 trialcs = dup_or_alloc_cpuset(cs); in cpuset_write_resmask()
3392 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
3395 retval = update_exclusive_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
3398 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
3425 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
3433 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
3436 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
3439 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
3442 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
3445 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus)); in cpuset_common_seq_show()
3448 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus)); in cpuset_common_seq_show()
3466 struct cpuset *cs = css_cs(seq_css(seq)); in cpuset_partition_show() local
3469 switch (cs->partition_root_state) { in cpuset_partition_show()
3485 err = perr_strings[READ_ONCE(cs->prs_err)]; in cpuset_partition_show()
3498 struct cpuset *cs = css_cs(of_css(of)); in cpuset_partition_write() local
3514 if (is_cpuset_online(cs)) in cpuset_partition_write()
3515 retval = update_prstate(cs, val); in cpuset_partition_write()
3610 struct cpuset *cs; in cpuset_css_alloc() local
3615 cs = dup_or_alloc_cpuset(NULL); in cpuset_css_alloc()
3616 if (!cs) in cpuset_css_alloc()
3619 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
3620 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
3621 cs->relax_domain_level = -1; in cpuset_css_alloc()
3625 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); in cpuset_css_alloc()
3627 return &cs->css; in cpuset_css_alloc()
3632 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
3633 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
3642 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
3644 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
3649 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_online()
3655 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
3656 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
3686 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
3687 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
3688 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
3689 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
3705 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
3708 if (!cpuset_v2() && is_sched_load_balance(cs)) in cpuset_css_offline()
3709 cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
3722 struct cpuset *cs = css_cs(css); in cpuset_css_killed() local
3726 if (is_partition_valid(cs)) in cpuset_css_killed()
3727 update_prstate(cs, PRS_MEMBER); in cpuset_css_killed()
3733 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
3735 free_cpuset(cs); in cpuset_css_free()
3763 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_can_fork() local
3768 same_cs = (cs == task_cs(current)); in cpuset_can_fork()
3778 ret = cpuset_can_attach_check(cs); in cpuset_can_fork()
3794 cs->attach_in_progress++; in cpuset_can_fork()
3802 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_cancel_fork() local
3806 same_cs = (cs == task_cs(current)); in cpuset_cancel_fork()
3812 dec_attach_in_progress(cs); in cpuset_cancel_fork()
3822 struct cpuset *cs; in cpuset_fork() local
3826 cs = task_cs(task); in cpuset_fork()
3827 same_cs = (cs == task_cs(current)); in cpuset_fork()
3831 if (cs == &top_cpuset) in cpuset_fork()
3841 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_fork()
3842 cpuset_attach_task(cs, task); in cpuset_fork()
3844 dec_attach_in_progress_locked(cs); in cpuset_fork()
3906 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3911 if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) in hotplug_update_tasks()
3912 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3914 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3917 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3918 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3922 cpuset_update_tasks_cpumask(cs, new_cpus); in hotplug_update_tasks()
3924 cpuset_update_tasks_nodemask(cs); in hotplug_update_tasks()
3941 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3951 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3959 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3964 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3965 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3966 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3968 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3975 remote = is_remote_partition(cs); in cpuset_hotplug_update_tasks()
3976 if (remote || (is_partition_valid(cs) && is_partition_valid(parent))) in cpuset_hotplug_update_tasks()
3977 compute_partition_effective_cpumask(cs, &new_cpus); in cpuset_hotplug_update_tasks()
3980 partition_is_populated(cs, NULL)) { in cpuset_hotplug_update_tasks()
3981 cs->prs_err = PERR_HOTPLUG; in cpuset_hotplug_update_tasks()
3982 remote_partition_disable(cs, tmp); in cpuset_hotplug_update_tasks()
3983 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3994 if (is_local_partition(cs) && (!is_partition_valid(parent) || in cpuset_hotplug_update_tasks()
3995 tasks_nocpu_error(parent, cs, &new_cpus))) in cpuset_hotplug_update_tasks()
4001 else if (is_partition_valid(parent) && is_partition_invalid(cs) && in cpuset_hotplug_update_tasks()
4002 !cpumask_empty(cs->effective_xcpus)) in cpuset_hotplug_update_tasks()
4006 update_parent_effective_cpumask(cs, partcmd, NULL, tmp); in cpuset_hotplug_update_tasks()
4007 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) { in cpuset_hotplug_update_tasks()
4008 compute_partition_effective_cpumask(cs, &new_cpus); in cpuset_hotplug_update_tasks()
4014 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
4015 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
4023 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
4026 cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
4117 struct cpuset *cs; in cpuset_handle_hotplug() local
4121 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_handle_hotplug()
4122 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_handle_hotplug()
4126 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_handle_hotplug()
4129 css_put(&cs->css); in cpuset_handle_hotplug()
4191 struct cpuset *cs; in __cpuset_cpus_allowed_locked() local
4193 cs = task_cs(tsk); in __cpuset_cpus_allowed_locked()
4194 if (cs != &top_cpuset) in __cpuset_cpus_allowed_locked()
4201 if ((cs == &top_cpuset) || cpumask_empty(pmask)) { in __cpuset_cpus_allowed_locked()
4340 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
4342 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
4343 cs = parent_cs(cs); in nearest_hardwall_ancestor()
4344 return cs; in nearest_hardwall_ancestor()
4389 struct cpuset *cs; /* current cpuset ancestors */ in cpuset_current_node_allowed() local
4412 cs = nearest_hardwall_ancestor(task_cs(current)); in cpuset_current_node_allowed()
4413 allowed = node_isset(node, cs->mems_allowed); in cpuset_current_node_allowed()
4422 struct cpuset *cs; in cpuset_node_allowed() local
4449 cs = container_of(css, struct cpuset, css); in cpuset_node_allowed()
4450 allowed = node_isset(nid, cs->effective_mems); in cpuset_node_allowed()