xref: /linux/lib/cpumask.c (revision 6863aaa88516292b885fdce5dd91925a00c3a3de)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/kernel.h>
4 #include <linux/bitops.h>
5 #include <linux/cpumask.h>
6 #include <linux/export.h>
7 #include <linux/memblock.h>
8 #include <linux/numa.h>
9 
10 /**
11  * cpumask_next_wrap - helper to implement for_each_cpu_wrap
12  * @n: the cpu prior to the place to search
13  * @mask: the cpumask pointer
14  * @start: the start point of the iteration
15  * @wrap: assume @n crossing @start terminates the iteration
16  *
17  * Returns >= nr_cpu_ids on completion
18  *
19  * Note: the @wrap argument is required for the start condition when
20  * we cannot assume @start is set in @mask.
21  */
22 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
23 {
24 	unsigned int next;
25 
26 again:
27 	next = cpumask_next(n, mask);
28 
29 	if (wrap && n < start && next >= start) {
30 		return nr_cpumask_bits;
31 
32 	} else if (next >= nr_cpumask_bits) {
33 		wrap = true;
34 		n = -1;
35 		goto again;
36 	}
37 
38 	return next;
39 }
40 EXPORT_SYMBOL(cpumask_next_wrap);
41 
42 /* These are not inline because of header tangles. */
43 #ifdef CONFIG_CPUMASK_OFFSTACK
44 /**
45  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
46  * @mask: pointer to cpumask_var_t where the cpumask is returned
47  * @flags: GFP_ flags
48  *
49  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
50  * a nop returning a constant 1 (in <linux/cpumask.h>)
51  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
52  *
53  * In addition, mask will be NULL if this fails.  Note that gcc is
54  * usually smart enough to know that mask can never be NULL if
55  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
56  * too.
57  */
58 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
59 {
60 	*mask = kmalloc_node(cpumask_size(), flags, node);
61 
62 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
63 	if (!*mask) {
64 		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
65 		dump_stack();
66 	}
67 #endif
68 
69 	return *mask != NULL;
70 }
71 EXPORT_SYMBOL(alloc_cpumask_var_node);
72 
73 /**
74  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
75  * @mask: pointer to cpumask_var_t where the cpumask is returned
76  *
77  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
78  * a nop (in <linux/cpumask.h>).
79  * Either returns an allocated (zero-filled) cpumask, or causes the
80  * system to panic.
81  */
82 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
83 {
84 	*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
85 	if (!*mask)
86 		panic("%s: Failed to allocate %u bytes\n", __func__,
87 		      cpumask_size());
88 }
89 
90 /**
91  * free_cpumask_var - frees memory allocated for a struct cpumask.
92  * @mask: cpumask to free
93  *
94  * This is safe on a NULL mask.
95  */
96 void free_cpumask_var(cpumask_var_t mask)
97 {
98 	kfree(mask);
99 }
100 EXPORT_SYMBOL(free_cpumask_var);
101 
102 /**
103  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
104  * @mask: cpumask to free
105  */
106 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
107 {
108 	memblock_free(mask, cpumask_size());
109 }
110 #endif
111 
112 /**
113  * cpumask_local_spread - select the i'th cpu with local numa cpu's first
114  * @i: index number
115  * @node: local numa_node
116  *
117  * This function selects an online CPU according to a numa aware policy;
118  * local cpus are returned first, followed by non-local ones, then it
119  * wraps around.
120  *
121  * It's not very efficient, but useful for setup.
122  */
123 unsigned int cpumask_local_spread(unsigned int i, int node)
124 {
125 	unsigned int cpu;
126 
127 	/* Wrap: we always want a cpu. */
128 	i %= num_online_cpus();
129 
130 	if (node == NUMA_NO_NODE) {
131 		cpu = cpumask_nth(i, cpu_online_mask);
132 		if (cpu < nr_cpu_ids)
133 			return cpu;
134 	} else {
135 		/* NUMA first. */
136 		cpu = cpumask_nth_and(i, cpu_online_mask, cpumask_of_node(node));
137 		if (cpu < nr_cpu_ids)
138 			return cpu;
139 
140 		i -= cpumask_weight_and(cpu_online_mask, cpumask_of_node(node));
141 
142 		/* Skip NUMA nodes, done above. */
143 		cpu = cpumask_nth_andnot(i, cpu_online_mask, cpumask_of_node(node));
144 		if (cpu < nr_cpu_ids)
145 			return cpu;
146 	}
147 	BUG();
148 }
149 EXPORT_SYMBOL(cpumask_local_spread);
150 
151 static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
152 
153 /**
154  * Returns an arbitrary cpu within srcp1 & srcp2.
155  *
156  * Iterated calls using the same srcp1 and srcp2 will be distributed within
157  * their intersection.
158  *
159  * Returns >= nr_cpu_ids if the intersection is empty.
160  */
161 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
162 			       const struct cpumask *src2p)
163 {
164 	unsigned int next, prev;
165 
166 	/* NOTE: our first selection will skip 0. */
167 	prev = __this_cpu_read(distribute_cpu_mask_prev);
168 
169 	next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
170 					nr_cpumask_bits, prev + 1);
171 	if (next < nr_cpu_ids)
172 		__this_cpu_write(distribute_cpu_mask_prev, next);
173 
174 	return next;
175 }
176 EXPORT_SYMBOL(cpumask_any_and_distribute);
177 
178 unsigned int cpumask_any_distribute(const struct cpumask *srcp)
179 {
180 	unsigned int next, prev;
181 
182 	/* NOTE: our first selection will skip 0. */
183 	prev = __this_cpu_read(distribute_cpu_mask_prev);
184 	next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
185 	if (next < nr_cpu_ids)
186 		__this_cpu_write(distribute_cpu_mask_prev, next);
187 
188 	return next;
189 }
190 EXPORT_SYMBOL(cpumask_any_distribute);
191