xref: /linux/include/linux/cpumask.h (revision 1dd0dd0b1fefd1e51cfaddf62316f759fde7de7d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_CPUMASK_H
3 #define __LINUX_CPUMASK_H
4 
5 /*
6  * Cpumasks provide a bitmap suitable for representing the
7  * set of CPU's in a system, one bit position per CPU number.  In general,
8  * only nr_cpu_ids (<= NR_CPUS) bits are valid.
9  */
10 #include <linux/kernel.h>
11 #include <linux/threads.h>
12 #include <linux/bitmap.h>
13 #include <linux/atomic.h>
14 #include <linux/bug.h>
15 #include <linux/gfp_types.h>
16 #include <linux/numa.h>
17 
18 /* Don't assign or return these: may not be this big! */
19 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
20 
21 /**
22  * cpumask_bits - get the bits in a cpumask
23  * @maskp: the struct cpumask *
24  *
25  * You should only assume nr_cpu_ids bits of this mask are valid.  This is
26  * a macro so it's const-correct.
27  */
28 #define cpumask_bits(maskp) ((maskp)->bits)
29 
30 /**
31  * cpumask_pr_args - printf args to output a cpumask
32  * @maskp: cpumask to be printed
33  *
34  * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
35  */
36 #define cpumask_pr_args(maskp)		nr_cpu_ids, cpumask_bits(maskp)
37 
38 #if NR_CPUS == 1
39 #define nr_cpu_ids		1U
40 #else
41 extern unsigned int nr_cpu_ids;
42 #endif
43 
44 #ifdef CONFIG_CPUMASK_OFFSTACK
45 /* Assuming NR_CPUS is huge, a runtime limit is more efficient.  Also,
46  * not all bits may be allocated. */
47 #define nr_cpumask_bits	nr_cpu_ids
48 #else
49 #define nr_cpumask_bits	((unsigned int)NR_CPUS)
50 #endif
51 
52 /*
53  * The following particular system cpumasks and operations manage
54  * possible, present, active and online cpus.
55  *
56  *     cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
57  *     cpu_present_mask - has bit 'cpu' set iff cpu is populated
58  *     cpu_online_mask  - has bit 'cpu' set iff cpu available to scheduler
59  *     cpu_active_mask  - has bit 'cpu' set iff cpu available to migration
60  *
61  *  If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
62  *
63  *  The cpu_possible_mask is fixed at boot time, as the set of CPU id's
64  *  that it is possible might ever be plugged in at anytime during the
65  *  life of that system boot.  The cpu_present_mask is dynamic(*),
66  *  representing which CPUs are currently plugged in.  And
67  *  cpu_online_mask is the dynamic subset of cpu_present_mask,
68  *  indicating those CPUs available for scheduling.
69  *
70  *  If HOTPLUG is enabled, then cpu_possible_mask is forced to have
71  *  all NR_CPUS bits set, otherwise it is just the set of CPUs that
72  *  ACPI reports present at boot.
73  *
74  *  If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
75  *  depending on what ACPI reports as currently plugged in, otherwise
76  *  cpu_present_mask is just a copy of cpu_possible_mask.
77  *
78  *  (*) Well, cpu_present_mask is dynamic in the hotplug case.  If not
79  *      hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
80  *
81  * Subtleties:
82  * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
83  *    assumption that their single CPU is online.  The UP
84  *    cpu_{online,possible,present}_masks are placebos.  Changing them
85  *    will have no useful affect on the following num_*_cpus()
86  *    and cpu_*() macros in the UP case.  This ugliness is a UP
87  *    optimization - don't waste any instructions or memory references
88  *    asking if you're online or how many CPUs there are if there is
89  *    only one CPU.
90  */
91 
92 extern struct cpumask __cpu_possible_mask;
93 extern struct cpumask __cpu_online_mask;
94 extern struct cpumask __cpu_present_mask;
95 extern struct cpumask __cpu_active_mask;
96 extern struct cpumask __cpu_dying_mask;
97 #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
98 #define cpu_online_mask   ((const struct cpumask *)&__cpu_online_mask)
99 #define cpu_present_mask  ((const struct cpumask *)&__cpu_present_mask)
100 #define cpu_active_mask   ((const struct cpumask *)&__cpu_active_mask)
101 #define cpu_dying_mask    ((const struct cpumask *)&__cpu_dying_mask)
102 
103 extern atomic_t __num_online_cpus;
104 
105 extern cpumask_t cpus_booted_once_mask;
106 
107 static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
108 {
109 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
110 	WARN_ON_ONCE(cpu >= bits);
111 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
112 }
113 
114 /* verify cpu argument to cpumask_* operators */
115 static __always_inline unsigned int cpumask_check(unsigned int cpu)
116 {
117 	cpu_max_bits_warn(cpu, nr_cpumask_bits);
118 	return cpu;
119 }
120 
121 /**
122  * cpumask_first - get the first cpu in a cpumask
123  * @srcp: the cpumask pointer
124  *
125  * Returns >= nr_cpu_ids if no cpus set.
126  */
127 static inline unsigned int cpumask_first(const struct cpumask *srcp)
128 {
129 	return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
130 }
131 
132 /**
133  * cpumask_first_zero - get the first unset cpu in a cpumask
134  * @srcp: the cpumask pointer
135  *
136  * Returns >= nr_cpu_ids if all cpus are set.
137  */
138 static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
139 {
140 	return find_first_zero_bit(cpumask_bits(srcp), nr_cpumask_bits);
141 }
142 
143 /**
144  * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
145  * @src1p: the first input
146  * @src2p: the second input
147  *
148  * Returns >= nr_cpu_ids if no cpus set in both.  See also cpumask_next_and().
149  */
150 static inline
151 unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
152 {
153 	return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits);
154 }
155 
156 /**
157  * cpumask_last - get the last CPU in a cpumask
158  * @srcp:	- the cpumask pointer
159  *
160  * Returns	>= nr_cpumask_bits if no CPUs set.
161  */
162 static inline unsigned int cpumask_last(const struct cpumask *srcp)
163 {
164 	return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
165 }
166 
167 /**
168  * cpumask_next - get the next cpu in a cpumask
169  * @n: the cpu prior to the place to search (ie. return will be > @n)
170  * @srcp: the cpumask pointer
171  *
172  * Returns >= nr_cpu_ids if no further cpus set.
173  */
174 static inline
175 unsigned int cpumask_next(int n, const struct cpumask *srcp)
176 {
177 	/* -1 is a legal arg here. */
178 	if (n != -1)
179 		cpumask_check(n);
180 	return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
181 }
182 
183 /**
184  * cpumask_next_zero - get the next unset cpu in a cpumask
185  * @n: the cpu prior to the place to search (ie. return will be > @n)
186  * @srcp: the cpumask pointer
187  *
188  * Returns >= nr_cpu_ids if no further cpus unset.
189  */
190 static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
191 {
192 	/* -1 is a legal arg here. */
193 	if (n != -1)
194 		cpumask_check(n);
195 	return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
196 }
197 
198 #if NR_CPUS == 1
199 /* Uniprocessor: there is only one valid CPU */
200 static inline unsigned int cpumask_local_spread(unsigned int i, int node)
201 {
202 	return 0;
203 }
204 
205 static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
206 						      const struct cpumask *src2p)
207 {
208 	return cpumask_first_and(src1p, src2p);
209 }
210 
211 static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
212 {
213 	return cpumask_first(srcp);
214 }
215 #else
216 unsigned int cpumask_local_spread(unsigned int i, int node);
217 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
218 			       const struct cpumask *src2p);
219 unsigned int cpumask_any_distribute(const struct cpumask *srcp);
220 #endif /* NR_CPUS */
221 
222 /**
223  * cpumask_next_and - get the next cpu in *src1p & *src2p
224  * @n: the cpu prior to the place to search (ie. return will be > @n)
225  * @src1p: the first cpumask pointer
226  * @src2p: the second cpumask pointer
227  *
228  * Returns >= nr_cpu_ids if no further cpus set in both.
229  */
230 static inline
231 unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
232 		     const struct cpumask *src2p)
233 {
234 	/* -1 is a legal arg here. */
235 	if (n != -1)
236 		cpumask_check(n);
237 	return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
238 		nr_cpumask_bits, n + 1);
239 }
240 
241 /**
242  * for_each_cpu - iterate over every cpu in a mask
243  * @cpu: the (optionally unsigned) integer iterator
244  * @mask: the cpumask pointer
245  *
246  * After the loop, cpu is >= nr_cpu_ids.
247  */
248 #define for_each_cpu(cpu, mask)				\
249 	for ((cpu) = -1;				\
250 		(cpu) = cpumask_next((cpu), (mask)),	\
251 		(cpu) < nr_cpu_ids;)
252 
253 /**
254  * for_each_cpu_not - iterate over every cpu in a complemented mask
255  * @cpu: the (optionally unsigned) integer iterator
256  * @mask: the cpumask pointer
257  *
258  * After the loop, cpu is >= nr_cpu_ids.
259  */
260 #define for_each_cpu_not(cpu, mask)				\
261 	for ((cpu) = -1;					\
262 		(cpu) = cpumask_next_zero((cpu), (mask)),	\
263 		(cpu) < nr_cpu_ids;)
264 
265 #if NR_CPUS == 1
266 static inline
267 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
268 {
269 	cpumask_check(start);
270 	if (n != -1)
271 		cpumask_check(n);
272 
273 	/*
274 	 * Return the first available CPU when wrapping, or when starting before cpu0,
275 	 * since there is only one valid option.
276 	 */
277 	if (wrap && n >= 0)
278 		return nr_cpumask_bits;
279 
280 	return cpumask_first(mask);
281 }
282 #else
283 unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
284 #endif
285 
286 /**
287  * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
288  * @cpu: the (optionally unsigned) integer iterator
289  * @mask: the cpumask pointer
290  * @start: the start location
291  *
292  * The implementation does not assume any bit in @mask is set (including @start).
293  *
294  * After the loop, cpu is >= nr_cpu_ids.
295  */
296 #define for_each_cpu_wrap(cpu, mask, start)					\
297 	for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false);	\
298 	     (cpu) < nr_cpumask_bits;						\
299 	     (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
300 
301 /**
302  * for_each_cpu_and - iterate over every cpu in both masks
303  * @cpu: the (optionally unsigned) integer iterator
304  * @mask1: the first cpumask pointer
305  * @mask2: the second cpumask pointer
306  *
307  * This saves a temporary CPU mask in many places.  It is equivalent to:
308  *	struct cpumask tmp;
309  *	cpumask_and(&tmp, &mask1, &mask2);
310  *	for_each_cpu(cpu, &tmp)
311  *		...
312  *
313  * After the loop, cpu is >= nr_cpu_ids.
314  */
315 #define for_each_cpu_and(cpu, mask1, mask2)				\
316 	for ((cpu) = -1;						\
317 		(cpu) = cpumask_next_and((cpu), (mask1), (mask2)),	\
318 		(cpu) < nr_cpu_ids;)
319 
320 /**
321  * cpumask_any_but - return a "random" in a cpumask, but not this one.
322  * @mask: the cpumask to search
323  * @cpu: the cpu to ignore.
324  *
325  * Often used to find any cpu but smp_processor_id() in a mask.
326  * Returns >= nr_cpu_ids if no cpus set.
327  */
328 static inline
329 unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
330 {
331 	unsigned int i;
332 
333 	cpumask_check(cpu);
334 	for_each_cpu(i, mask)
335 		if (i != cpu)
336 			break;
337 	return i;
338 }
339 
340 #define CPU_BITS_NONE						\
341 {								\
342 	[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL			\
343 }
344 
345 #define CPU_BITS_CPU0						\
346 {								\
347 	[0] =  1UL						\
348 }
349 
350 /**
351  * cpumask_set_cpu - set a cpu in a cpumask
352  * @cpu: cpu number (< nr_cpu_ids)
353  * @dstp: the cpumask pointer
354  */
355 static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
356 {
357 	set_bit(cpumask_check(cpu), cpumask_bits(dstp));
358 }
359 
360 static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
361 {
362 	__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
363 }
364 
365 
366 /**
367  * cpumask_clear_cpu - clear a cpu in a cpumask
368  * @cpu: cpu number (< nr_cpu_ids)
369  * @dstp: the cpumask pointer
370  */
371 static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
372 {
373 	clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
374 }
375 
376 static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
377 {
378 	__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
379 }
380 
381 /**
382  * cpumask_test_cpu - test for a cpu in a cpumask
383  * @cpu: cpu number (< nr_cpu_ids)
384  * @cpumask: the cpumask pointer
385  *
386  * Returns true if @cpu is set in @cpumask, else returns false
387  */
388 static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
389 {
390 	return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
391 }
392 
393 /**
394  * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
395  * @cpu: cpu number (< nr_cpu_ids)
396  * @cpumask: the cpumask pointer
397  *
398  * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
399  *
400  * test_and_set_bit wrapper for cpumasks.
401  */
402 static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
403 {
404 	return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
405 }
406 
407 /**
408  * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
409  * @cpu: cpu number (< nr_cpu_ids)
410  * @cpumask: the cpumask pointer
411  *
412  * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
413  *
414  * test_and_clear_bit wrapper for cpumasks.
415  */
416 static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
417 {
418 	return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
419 }
420 
421 /**
422  * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
423  * @dstp: the cpumask pointer
424  */
425 static inline void cpumask_setall(struct cpumask *dstp)
426 {
427 	bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
428 }
429 
430 /**
431  * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
432  * @dstp: the cpumask pointer
433  */
434 static inline void cpumask_clear(struct cpumask *dstp)
435 {
436 	bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
437 }
438 
439 /**
440  * cpumask_and - *dstp = *src1p & *src2p
441  * @dstp: the cpumask result
442  * @src1p: the first input
443  * @src2p: the second input
444  *
445  * If *@dstp is empty, returns false, else returns true
446  */
447 static inline bool cpumask_and(struct cpumask *dstp,
448 			       const struct cpumask *src1p,
449 			       const struct cpumask *src2p)
450 {
451 	return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
452 				       cpumask_bits(src2p), nr_cpumask_bits);
453 }
454 
455 /**
456  * cpumask_or - *dstp = *src1p | *src2p
457  * @dstp: the cpumask result
458  * @src1p: the first input
459  * @src2p: the second input
460  */
461 static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
462 			      const struct cpumask *src2p)
463 {
464 	bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
465 				      cpumask_bits(src2p), nr_cpumask_bits);
466 }
467 
468 /**
469  * cpumask_xor - *dstp = *src1p ^ *src2p
470  * @dstp: the cpumask result
471  * @src1p: the first input
472  * @src2p: the second input
473  */
474 static inline void cpumask_xor(struct cpumask *dstp,
475 			       const struct cpumask *src1p,
476 			       const struct cpumask *src2p)
477 {
478 	bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
479 				       cpumask_bits(src2p), nr_cpumask_bits);
480 }
481 
482 /**
483  * cpumask_andnot - *dstp = *src1p & ~*src2p
484  * @dstp: the cpumask result
485  * @src1p: the first input
486  * @src2p: the second input
487  *
488  * If *@dstp is empty, returns false, else returns true
489  */
490 static inline bool cpumask_andnot(struct cpumask *dstp,
491 				  const struct cpumask *src1p,
492 				  const struct cpumask *src2p)
493 {
494 	return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
495 					  cpumask_bits(src2p), nr_cpumask_bits);
496 }
497 
498 /**
499  * cpumask_complement - *dstp = ~*srcp
500  * @dstp: the cpumask result
501  * @srcp: the input to invert
502  */
503 static inline void cpumask_complement(struct cpumask *dstp,
504 				      const struct cpumask *srcp)
505 {
506 	bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
507 					      nr_cpumask_bits);
508 }
509 
510 /**
511  * cpumask_equal - *src1p == *src2p
512  * @src1p: the first input
513  * @src2p: the second input
514  */
515 static inline bool cpumask_equal(const struct cpumask *src1p,
516 				const struct cpumask *src2p)
517 {
518 	return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
519 						 nr_cpumask_bits);
520 }
521 
522 /**
523  * cpumask_or_equal - *src1p | *src2p == *src3p
524  * @src1p: the first input
525  * @src2p: the second input
526  * @src3p: the third input
527  */
528 static inline bool cpumask_or_equal(const struct cpumask *src1p,
529 				    const struct cpumask *src2p,
530 				    const struct cpumask *src3p)
531 {
532 	return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
533 			       cpumask_bits(src3p), nr_cpumask_bits);
534 }
535 
536 /**
537  * cpumask_intersects - (*src1p & *src2p) != 0
538  * @src1p: the first input
539  * @src2p: the second input
540  */
541 static inline bool cpumask_intersects(const struct cpumask *src1p,
542 				     const struct cpumask *src2p)
543 {
544 	return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
545 						      nr_cpumask_bits);
546 }
547 
548 /**
549  * cpumask_subset - (*src1p & ~*src2p) == 0
550  * @src1p: the first input
551  * @src2p: the second input
552  *
553  * Returns true if *@src1p is a subset of *@src2p, else returns false
554  */
555 static inline bool cpumask_subset(const struct cpumask *src1p,
556 				 const struct cpumask *src2p)
557 {
558 	return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
559 						  nr_cpumask_bits);
560 }
561 
562 /**
563  * cpumask_empty - *srcp == 0
564  * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
565  */
566 static inline bool cpumask_empty(const struct cpumask *srcp)
567 {
568 	return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
569 }
570 
571 /**
572  * cpumask_full - *srcp == 0xFFFFFFFF...
573  * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
574  */
575 static inline bool cpumask_full(const struct cpumask *srcp)
576 {
577 	return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
578 }
579 
580 /**
581  * cpumask_weight - Count of bits in *srcp
582  * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
583  */
584 static inline unsigned int cpumask_weight(const struct cpumask *srcp)
585 {
586 	return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
587 }
588 
589 /**
590  * cpumask_shift_right - *dstp = *srcp >> n
591  * @dstp: the cpumask result
592  * @srcp: the input to shift
593  * @n: the number of bits to shift by
594  */
595 static inline void cpumask_shift_right(struct cpumask *dstp,
596 				       const struct cpumask *srcp, int n)
597 {
598 	bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
599 					       nr_cpumask_bits);
600 }
601 
602 /**
603  * cpumask_shift_left - *dstp = *srcp << n
604  * @dstp: the cpumask result
605  * @srcp: the input to shift
606  * @n: the number of bits to shift by
607  */
608 static inline void cpumask_shift_left(struct cpumask *dstp,
609 				      const struct cpumask *srcp, int n)
610 {
611 	bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
612 					      nr_cpumask_bits);
613 }
614 
615 /**
616  * cpumask_copy - *dstp = *srcp
617  * @dstp: the result
618  * @srcp: the input cpumask
619  */
620 static inline void cpumask_copy(struct cpumask *dstp,
621 				const struct cpumask *srcp)
622 {
623 	bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
624 }
625 
626 /**
627  * cpumask_any - pick a "random" cpu from *srcp
628  * @srcp: the input cpumask
629  *
630  * Returns >= nr_cpu_ids if no cpus set.
631  */
632 #define cpumask_any(srcp) cpumask_first(srcp)
633 
634 /**
635  * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
636  * @mask1: the first input cpumask
637  * @mask2: the second input cpumask
638  *
639  * Returns >= nr_cpu_ids if no cpus set.
640  */
641 #define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
642 
643 /**
644  * cpumask_of - the cpumask containing just a given cpu
645  * @cpu: the cpu (<= nr_cpu_ids)
646  */
647 #define cpumask_of(cpu) (get_cpu_mask(cpu))
648 
649 /**
650  * cpumask_parse_user - extract a cpumask from a user string
651  * @buf: the buffer to extract from
652  * @len: the length of the buffer
653  * @dstp: the cpumask to set.
654  *
655  * Returns -errno, or 0 for success.
656  */
657 static inline int cpumask_parse_user(const char __user *buf, int len,
658 				     struct cpumask *dstp)
659 {
660 	return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
661 }
662 
663 /**
664  * cpumask_parselist_user - extract a cpumask from a user string
665  * @buf: the buffer to extract from
666  * @len: the length of the buffer
667  * @dstp: the cpumask to set.
668  *
669  * Returns -errno, or 0 for success.
670  */
671 static inline int cpumask_parselist_user(const char __user *buf, int len,
672 				     struct cpumask *dstp)
673 {
674 	return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
675 				     nr_cpumask_bits);
676 }
677 
678 /**
679  * cpumask_parse - extract a cpumask from a string
680  * @buf: the buffer to extract from
681  * @dstp: the cpumask to set.
682  *
683  * Returns -errno, or 0 for success.
684  */
685 static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
686 {
687 	return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
688 }
689 
690 /**
691  * cpulist_parse - extract a cpumask from a user string of ranges
692  * @buf: the buffer to extract from
693  * @dstp: the cpumask to set.
694  *
695  * Returns -errno, or 0 for success.
696  */
697 static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
698 {
699 	return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
700 }
701 
702 /**
703  * cpumask_size - size to allocate for a 'struct cpumask' in bytes
704  */
705 static inline unsigned int cpumask_size(void)
706 {
707 	return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
708 }
709 
710 /*
711  * cpumask_var_t: struct cpumask for stack usage.
712  *
713  * Oh, the wicked games we play!  In order to make kernel coding a
714  * little more difficult, we typedef cpumask_var_t to an array or a
715  * pointer: doing &mask on an array is a noop, so it still works.
716  *
717  * ie.
718  *	cpumask_var_t tmpmask;
719  *	if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
720  *		return -ENOMEM;
721  *
722  *	  ... use 'tmpmask' like a normal struct cpumask * ...
723  *
724  *	free_cpumask_var(tmpmask);
725  *
726  *
727  * However, one notable exception is there. alloc_cpumask_var() allocates
728  * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
729  * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
730  *
731  *	cpumask_var_t tmpmask;
732  *	if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
733  *		return -ENOMEM;
734  *
735  *	var = *tmpmask;
736  *
737  * This code makes NR_CPUS length memcopy and brings to a memory corruption.
738  * cpumask_copy() provide safe copy functionality.
739  *
740  * Note that there is another evil here: If you define a cpumask_var_t
741  * as a percpu variable then the way to obtain the address of the cpumask
742  * structure differently influences what this_cpu_* operation needs to be
743  * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
744  * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
745  * other type of cpumask_var_t implementation is configured.
746  *
747  * Please also note that __cpumask_var_read_mostly can be used to declare
748  * a cpumask_var_t variable itself (not its content) as read mostly.
749  */
750 #ifdef CONFIG_CPUMASK_OFFSTACK
751 typedef struct cpumask *cpumask_var_t;
752 
753 #define this_cpu_cpumask_var_ptr(x)	this_cpu_read(x)
754 #define __cpumask_var_read_mostly	__read_mostly
755 
756 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
757 
758 static inline
759 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
760 {
761 	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
762 }
763 
764 /**
765  * alloc_cpumask_var - allocate a struct cpumask
766  * @mask: pointer to cpumask_var_t where the cpumask is returned
767  * @flags: GFP_ flags
768  *
769  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
770  * a nop returning a constant 1 (in <linux/cpumask.h>).
771  *
772  * See alloc_cpumask_var_node.
773  */
774 static inline
775 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
776 {
777 	return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
778 }
779 
780 static inline
781 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
782 {
783 	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
784 }
785 
786 void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
787 void free_cpumask_var(cpumask_var_t mask);
788 void free_bootmem_cpumask_var(cpumask_var_t mask);
789 
790 static inline bool cpumask_available(cpumask_var_t mask)
791 {
792 	return mask != NULL;
793 }
794 
795 #else
796 typedef struct cpumask cpumask_var_t[1];
797 
798 #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
799 #define __cpumask_var_read_mostly
800 
801 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
802 {
803 	return true;
804 }
805 
806 static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
807 					  int node)
808 {
809 	return true;
810 }
811 
812 static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
813 {
814 	cpumask_clear(*mask);
815 	return true;
816 }
817 
818 static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
819 					  int node)
820 {
821 	cpumask_clear(*mask);
822 	return true;
823 }
824 
825 static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
826 {
827 }
828 
829 static inline void free_cpumask_var(cpumask_var_t mask)
830 {
831 }
832 
833 static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
834 {
835 }
836 
837 static inline bool cpumask_available(cpumask_var_t mask)
838 {
839 	return true;
840 }
841 #endif /* CONFIG_CPUMASK_OFFSTACK */
842 
843 /* It's common to want to use cpu_all_mask in struct member initializers,
844  * so it has to refer to an address rather than a pointer. */
845 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
846 #define cpu_all_mask to_cpumask(cpu_all_bits)
847 
848 /* First bits of cpu_bit_bitmap are in fact unset. */
849 #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
850 
851 #if NR_CPUS == 1
852 /* Uniprocessor: the possible/online/present masks are always "1" */
853 #define for_each_possible_cpu(cpu)	for ((cpu) = 0; (cpu) < 1; (cpu)++)
854 #define for_each_online_cpu(cpu)	for ((cpu) = 0; (cpu) < 1; (cpu)++)
855 #define for_each_present_cpu(cpu)	for ((cpu) = 0; (cpu) < 1; (cpu)++)
856 #else
857 #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
858 #define for_each_online_cpu(cpu)   for_each_cpu((cpu), cpu_online_mask)
859 #define for_each_present_cpu(cpu)  for_each_cpu((cpu), cpu_present_mask)
860 #endif
861 
862 /* Wrappers for arch boot code to manipulate normally-constant masks */
863 void init_cpu_present(const struct cpumask *src);
864 void init_cpu_possible(const struct cpumask *src);
865 void init_cpu_online(const struct cpumask *src);
866 
867 static inline void reset_cpu_possible_mask(void)
868 {
869 	bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
870 }
871 
872 static inline void
873 set_cpu_possible(unsigned int cpu, bool possible)
874 {
875 	if (possible)
876 		cpumask_set_cpu(cpu, &__cpu_possible_mask);
877 	else
878 		cpumask_clear_cpu(cpu, &__cpu_possible_mask);
879 }
880 
881 static inline void
882 set_cpu_present(unsigned int cpu, bool present)
883 {
884 	if (present)
885 		cpumask_set_cpu(cpu, &__cpu_present_mask);
886 	else
887 		cpumask_clear_cpu(cpu, &__cpu_present_mask);
888 }
889 
890 void set_cpu_online(unsigned int cpu, bool online);
891 
892 static inline void
893 set_cpu_active(unsigned int cpu, bool active)
894 {
895 	if (active)
896 		cpumask_set_cpu(cpu, &__cpu_active_mask);
897 	else
898 		cpumask_clear_cpu(cpu, &__cpu_active_mask);
899 }
900 
901 static inline void
902 set_cpu_dying(unsigned int cpu, bool dying)
903 {
904 	if (dying)
905 		cpumask_set_cpu(cpu, &__cpu_dying_mask);
906 	else
907 		cpumask_clear_cpu(cpu, &__cpu_dying_mask);
908 }
909 
910 /**
911  * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
912  * @bitmap: the bitmap
913  *
914  * There are a few places where cpumask_var_t isn't appropriate and
915  * static cpumasks must be used (eg. very early boot), yet we don't
916  * expose the definition of 'struct cpumask'.
917  *
918  * This does the conversion, and can be used as a constant initializer.
919  */
920 #define to_cpumask(bitmap)						\
921 	((struct cpumask *)(1 ? (bitmap)				\
922 			    : (void *)sizeof(__check_is_bitmap(bitmap))))
923 
924 static inline int __check_is_bitmap(const unsigned long *bitmap)
925 {
926 	return 1;
927 }
928 
929 /*
930  * Special-case data structure for "single bit set only" constant CPU masks.
931  *
932  * We pre-generate all the 64 (or 32) possible bit positions, with enough
933  * padding to the left and the right, and return the constant pointer
934  * appropriately offset.
935  */
936 extern const unsigned long
937 	cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
938 
939 static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
940 {
941 	const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
942 	p -= cpu / BITS_PER_LONG;
943 	return to_cpumask(p);
944 }
945 
946 #if NR_CPUS > 1
947 /**
948  * num_online_cpus() - Read the number of online CPUs
949  *
950  * Despite the fact that __num_online_cpus is of type atomic_t, this
951  * interface gives only a momentary snapshot and is not protected against
952  * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
953  * region.
954  */
955 static inline unsigned int num_online_cpus(void)
956 {
957 	return atomic_read(&__num_online_cpus);
958 }
959 #define num_possible_cpus()	cpumask_weight(cpu_possible_mask)
960 #define num_present_cpus()	cpumask_weight(cpu_present_mask)
961 #define num_active_cpus()	cpumask_weight(cpu_active_mask)
962 
963 static inline bool cpu_online(unsigned int cpu)
964 {
965 	return cpumask_test_cpu(cpu, cpu_online_mask);
966 }
967 
968 static inline bool cpu_possible(unsigned int cpu)
969 {
970 	return cpumask_test_cpu(cpu, cpu_possible_mask);
971 }
972 
973 static inline bool cpu_present(unsigned int cpu)
974 {
975 	return cpumask_test_cpu(cpu, cpu_present_mask);
976 }
977 
978 static inline bool cpu_active(unsigned int cpu)
979 {
980 	return cpumask_test_cpu(cpu, cpu_active_mask);
981 }
982 
983 static inline bool cpu_dying(unsigned int cpu)
984 {
985 	return cpumask_test_cpu(cpu, cpu_dying_mask);
986 }
987 
988 #else
989 
990 #define num_online_cpus()	1U
991 #define num_possible_cpus()	1U
992 #define num_present_cpus()	1U
993 #define num_active_cpus()	1U
994 
995 static inline bool cpu_online(unsigned int cpu)
996 {
997 	return cpu == 0;
998 }
999 
1000 static inline bool cpu_possible(unsigned int cpu)
1001 {
1002 	return cpu == 0;
1003 }
1004 
1005 static inline bool cpu_present(unsigned int cpu)
1006 {
1007 	return cpu == 0;
1008 }
1009 
1010 static inline bool cpu_active(unsigned int cpu)
1011 {
1012 	return cpu == 0;
1013 }
1014 
1015 static inline bool cpu_dying(unsigned int cpu)
1016 {
1017 	return false;
1018 }
1019 
1020 #endif /* NR_CPUS > 1 */
1021 
1022 #define cpu_is_offline(cpu)	unlikely(!cpu_online(cpu))
1023 
1024 #if NR_CPUS <= BITS_PER_LONG
1025 #define CPU_BITS_ALL						\
1026 {								\
1027 	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
1028 }
1029 
1030 #else /* NR_CPUS > BITS_PER_LONG */
1031 
1032 #define CPU_BITS_ALL						\
1033 {								\
1034 	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,		\
1035 	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
1036 }
1037 #endif /* NR_CPUS > BITS_PER_LONG */
1038 
1039 /**
1040  * cpumap_print_to_pagebuf  - copies the cpumask into the buffer either
1041  *	as comma-separated list of cpus or hex values of cpumask
1042  * @list: indicates whether the cpumap must be list
1043  * @mask: the cpumask to copy
1044  * @buf: the buffer to copy into
1045  *
1046  * Returns the length of the (null-terminated) @buf string, zero if
1047  * nothing is copied.
1048  */
1049 static inline ssize_t
1050 cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
1051 {
1052 	return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
1053 				      nr_cpu_ids);
1054 }
1055 
1056 /**
1057  * cpumap_print_bitmask_to_buf  - copies the cpumask into the buffer as
1058  *	hex values of cpumask
1059  *
1060  * @buf: the buffer to copy into
1061  * @mask: the cpumask to copy
1062  * @off: in the string from which we are copying, we copy to @buf
1063  * @count: the maximum number of bytes to print
1064  *
1065  * The function prints the cpumask into the buffer as hex values of
1066  * cpumask; Typically used by bin_attribute to export cpumask bitmask
1067  * ABI.
1068  *
1069  * Returns the length of how many bytes have been copied, excluding
1070  * terminating '\0'.
1071  */
1072 static inline ssize_t
1073 cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
1074 		loff_t off, size_t count)
1075 {
1076 	return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
1077 				   nr_cpu_ids, off, count) - 1;
1078 }
1079 
1080 /**
1081  * cpumap_print_list_to_buf  - copies the cpumask into the buffer as
1082  *	comma-separated list of cpus
1083  *
1084  * Everything is same with the above cpumap_print_bitmask_to_buf()
1085  * except the print format.
1086  */
1087 static inline ssize_t
1088 cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
1089 		loff_t off, size_t count)
1090 {
1091 	return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
1092 				   nr_cpu_ids, off, count) - 1;
1093 }
1094 
1095 #if NR_CPUS <= BITS_PER_LONG
1096 #define CPU_MASK_ALL							\
1097 (cpumask_t) { {								\
1098 	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
1099 } }
1100 #else
1101 #define CPU_MASK_ALL							\
1102 (cpumask_t) { {								\
1103 	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,			\
1104 	[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS)	\
1105 } }
1106 #endif /* NR_CPUS > BITS_PER_LONG */
1107 
1108 #define CPU_MASK_NONE							\
1109 (cpumask_t) { {								\
1110 	[0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL				\
1111 } }
1112 
1113 #define CPU_MASK_CPU0							\
1114 (cpumask_t) { {								\
1115 	[0] =  1UL							\
1116 } }
1117 
1118 /*
1119  * Provide a valid theoretical max size for cpumap and cpulist sysfs files
1120  * to avoid breaking userspace which may allocate a buffer based on the size
1121  * reported by e.g. fstat.
1122  *
1123  * for cpumap NR_CPUS * 9/32 - 1 should be an exact length.
1124  *
1125  * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up
1126  * to 2 orders of magnitude larger than 8192. And then we divide by 2 to
1127  * cover a worst-case of every other cpu being on one of two nodes for a
1128  * very large NR_CPUS.
1129  *
1130  *  Use PAGE_SIZE as a minimum for smaller configurations.
1131  */
1132 #define CPUMAP_FILE_MAX_BYTES  ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
1133 					? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
1134 #define CPULIST_FILE_MAX_BYTES  (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
1135 
1136 #endif /* __LINUX_CPUMASK_H */
1137