xref: /linux/lib/cpumask.c (revision bc93e19d088bb14e116756ab270deea6ee62d782)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/kernel.h>
4 #include <linux/bitops.h>
5 #include <linux/cpumask.h>
6 #include <linux/export.h>
7 #include <linux/memblock.h>
8 #include <linux/numa.h>
9 
10 /**
11  * cpumask_next_wrap - helper to implement for_each_cpu_wrap
12  * @n: the cpu prior to the place to search
13  * @mask: the cpumask pointer
14  * @start: the start point of the iteration
15  * @wrap: assume @n crossing @start terminates the iteration
16  *
17  * Returns >= nr_cpu_ids on completion
18  *
19  * Note: the @wrap argument is required for the start condition when
20  * we cannot assume @start is set in @mask.
21  */
22 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
23 {
24 	unsigned int next;
25 
26 again:
27 	next = cpumask_next(n, mask);
28 
29 	if (wrap && n < start && next >= start) {
30 		return nr_cpumask_bits;
31 
32 	} else if (next >= nr_cpumask_bits) {
33 		wrap = true;
34 		n = -1;
35 		goto again;
36 	}
37 
38 	return next;
39 }
40 EXPORT_SYMBOL(cpumask_next_wrap);
41 
42 /* These are not inline because of header tangles. */
43 #ifdef CONFIG_CPUMASK_OFFSTACK
44 /**
45  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
46  * @mask: pointer to cpumask_var_t where the cpumask is returned
47  * @flags: GFP_ flags
48  *
49  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
50  * a nop returning a constant 1 (in <linux/cpumask.h>)
51  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
52  *
53  * In addition, mask will be NULL if this fails.  Note that gcc is
54  * usually smart enough to know that mask can never be NULL if
55  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
56  * too.
57  */
58 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
59 {
60 	*mask = kmalloc_node(cpumask_size(), flags, node);
61 
62 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
63 	if (!*mask) {
64 		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
65 		dump_stack();
66 	}
67 #endif
68 
69 	return *mask != NULL;
70 }
71 EXPORT_SYMBOL(alloc_cpumask_var_node);
72 
73 /**
74  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
75  * @mask: pointer to cpumask_var_t where the cpumask is returned
76  *
77  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
78  * a nop (in <linux/cpumask.h>).
79  * Either returns an allocated (zero-filled) cpumask, or causes the
80  * system to panic.
81  */
82 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
83 {
84 	*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
85 	if (!*mask)
86 		panic("%s: Failed to allocate %u bytes\n", __func__,
87 		      cpumask_size());
88 }
89 
90 /**
91  * free_cpumask_var - frees memory allocated for a struct cpumask.
92  * @mask: cpumask to free
93  *
94  * This is safe on a NULL mask.
95  */
96 void free_cpumask_var(cpumask_var_t mask)
97 {
98 	kfree(mask);
99 }
100 EXPORT_SYMBOL(free_cpumask_var);
101 
102 /**
103  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
104  * @mask: cpumask to free
105  */
106 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
107 {
108 	memblock_free(mask, cpumask_size());
109 }
110 #endif
111 
112 /**
113  * cpumask_local_spread - select the i'th cpu with local numa cpu's first
114  * @i: index number
115  * @node: local numa_node
116  *
117  * This function selects an online CPU according to a numa aware policy;
118  * local cpus are returned first, followed by non-local ones, then it
119  * wraps around.
120  *
121  * It's not very efficient, but useful for setup.
122  */
123 unsigned int cpumask_local_spread(unsigned int i, int node)
124 {
125 	unsigned int cpu;
126 
127 	/* Wrap: we always want a cpu. */
128 	i %= num_online_cpus();
129 
130 	if (node == NUMA_NO_NODE) {
131 		for_each_cpu(cpu, cpu_online_mask)
132 			if (i-- == 0)
133 				return cpu;
134 	} else {
135 		/* NUMA first. */
136 		for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
137 			if (i-- == 0)
138 				return cpu;
139 
140 		for_each_cpu(cpu, cpu_online_mask) {
141 			/* Skip NUMA nodes, done above. */
142 			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
143 				continue;
144 
145 			if (i-- == 0)
146 				return cpu;
147 		}
148 	}
149 	BUG();
150 }
151 EXPORT_SYMBOL(cpumask_local_spread);
152 
153 static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
154 
155 /**
156  * Returns an arbitrary cpu within srcp1 & srcp2.
157  *
158  * Iterated calls using the same srcp1 and srcp2 will be distributed within
159  * their intersection.
160  *
161  * Returns >= nr_cpu_ids if the intersection is empty.
162  */
163 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
164 			       const struct cpumask *src2p)
165 {
166 	unsigned int next, prev;
167 
168 	/* NOTE: our first selection will skip 0. */
169 	prev = __this_cpu_read(distribute_cpu_mask_prev);
170 
171 	next = cpumask_next_and(prev, src1p, src2p);
172 	if (next >= nr_cpu_ids)
173 		next = cpumask_first_and(src1p, src2p);
174 
175 	if (next < nr_cpu_ids)
176 		__this_cpu_write(distribute_cpu_mask_prev, next);
177 
178 	return next;
179 }
180 EXPORT_SYMBOL(cpumask_any_and_distribute);
181 
182 unsigned int cpumask_any_distribute(const struct cpumask *srcp)
183 {
184 	unsigned int next, prev;
185 
186 	/* NOTE: our first selection will skip 0. */
187 	prev = __this_cpu_read(distribute_cpu_mask_prev);
188 
189 	next = cpumask_next(prev, srcp);
190 	if (next >= nr_cpu_ids)
191 		next = cpumask_first(srcp);
192 
193 	if (next < nr_cpu_ids)
194 		__this_cpu_write(distribute_cpu_mask_prev, next);
195 
196 	return next;
197 }
198 EXPORT_SYMBOL(cpumask_any_distribute);
199