Lines Matching +full:in +full:- +full:masks

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2016-2017 Christoph Hellwig.
29 cpus_per_grp--; in grp_spread_init_one()
33 for (sibl = -1; cpus_per_grp > 0; ) { in grp_spread_init_one()
40 cpus_per_grp--; in grp_spread_init_one()
47 cpumask_var_t *masks; in alloc_node_to_cpumask() local
50 masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL); in alloc_node_to_cpumask()
51 if (!masks) in alloc_node_to_cpumask()
55 if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) in alloc_node_to_cpumask()
59 return masks; in alloc_node_to_cpumask()
62 while (--node >= 0) in alloc_node_to_cpumask()
63 free_cpumask_var(masks[node]); in alloc_node_to_cpumask()
64 kfree(masks); in alloc_node_to_cpumask()
68 static void free_node_to_cpumask(cpumask_var_t *masks) in free_node_to_cpumask() argument
73 free_cpumask_var(masks[node]); in free_node_to_cpumask()
74 kfree(masks); in free_node_to_cpumask()
77 static void build_node_to_cpumask(cpumask_var_t *masks) in build_node_to_cpumask() argument
82 cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); in build_node_to_cpumask()
90 /* Calculate the number of nodes in the supplied affinity mask */ in get_nodes_in_cpumask()
114 return ln->ncpus - rn->ncpus; in ncpus_cmp_func()
127 * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
163 * node's nr_cpus to remaining un-assigned ncpus. 'numgrps' is in alloc_nodes_groups()
168 * least one group, and the theory is simple: over-allocation in alloc_nodes_groups()
186 * grps(B) = G - grps(A) in alloc_nodes_groups()
189 * G = N - delta, and 0 <= delta <= N - 2 in alloc_nodes_groups()
202 * over-allocated, so grps(B) <= ncpu(B), in alloc_nodes_groups()
208 * round_down((N - delta) * ncpu(A) / N) = in alloc_nodes_groups()
209 * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >= in alloc_nodes_groups()
210 * round_down((N * ncpu(A) - delta * N) / N) = in alloc_nodes_groups()
211 * cpu(A) - delta in alloc_nodes_groups()
215 * grps(A) - G >= ncpu(A) - delta - G in alloc_nodes_groups()
217 * G - grps(A) <= G + delta - ncpu(A) in alloc_nodes_groups()
219 * grps(B) <= N - ncpu(A) in alloc_nodes_groups()
225 * and we always re-calculate 'remaining_ncpus' & 'numgrps', and in alloc_nodes_groups()
244 remaining_ncpus -= ncpus; in alloc_nodes_groups()
245 numgrps -= ngroups; in alloc_nodes_groups()
252 struct cpumask *nmsk, struct cpumask *masks) in __group_cpus_evenly() argument
266 * If the number of nodes in the mask is greater than or equal the in __group_cpus_evenly()
271 /* Ensure that only CPUs which are in both masks are set */ in __group_cpus_evenly()
273 cpumask_or(&masks[curgrp], &masks[curgrp], nmsk); in __group_cpus_evenly()
284 return -ENOMEM; in __group_cpus_evenly()
293 if (nv->ngroups == UINT_MAX) in __group_cpus_evenly()
296 /* Get the cpus on this node which are in the mask */ in __group_cpus_evenly()
297 cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]); in __group_cpus_evenly()
302 WARN_ON_ONCE(nv->ngroups > ncpus); in __group_cpus_evenly()
305 extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups); in __group_cpus_evenly()
308 for (v = 0; v < nv->ngroups; v++, curgrp++) { in __group_cpus_evenly()
309 cpus_per_grp = ncpus / nv->ngroups; in __group_cpus_evenly()
314 --extra_grps; in __group_cpus_evenly()
323 grp_spread_init_one(&masks[curgrp], nmsk, in __group_cpus_evenly()
326 done += nv->ngroups; in __group_cpus_evenly()
333 * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
340 * same group, and run two-stage grouping:
344 * We guarantee in the resulted grouping that all CPUs are covered, and
352 int ret = -ENOMEM; in group_cpus_evenly()
353 struct cpumask *masks = NULL; in group_cpus_evenly() local
365 masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); in group_cpus_evenly()
366 if (!masks) in group_cpus_evenly()
379 * CPU is handled in the 1st or 2nd stage, and either way is correct in group_cpus_evenly()
380 * from API user viewpoint since 2-stage spread is sort of in group_cpus_evenly()
387 npresmsk, nmsk, masks); in group_cpus_evenly()
404 npresmsk, nmsk, masks); in group_cpus_evenly()
421 kfree(masks); in group_cpus_evenly()
424 return masks; in group_cpus_evenly()
429 struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); in group_cpus_evenly() local
431 if (!masks) in group_cpus_evenly()
435 cpumask_copy(&masks[0], cpu_possible_mask); in group_cpus_evenly()
436 return masks; in group_cpus_evenly()