xref: /freebsd/sys/compat/linuxkpi/common/include/linux/bitops.h (revision 34892a8e30055000352d9612ad985be550c82bea)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUXKPI_LINUX_BITOPS_H_
30 #define	_LINUXKPI_LINUX_BITOPS_H_
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/errno.h>
36 #include <sys/libkern.h>
37 
38 #define	BIT(nr)			(1UL << (nr))
39 #define	BIT_ULL(nr)		(1ULL << (nr))
40 #define	BITS_PER_LONG		(__SIZEOF_LONG__ * __CHAR_BIT__)
41 #define	BITS_PER_LONG_LONG	(__SIZEOF_LONG_LONG__ * __CHAR_BIT__)
42 
43 #define	BITMAP_FIRST_WORD_MASK(start)	(~0UL << ((start) % BITS_PER_LONG))
44 #define	BITMAP_LAST_WORD_MASK(n)	(~0UL >> (BITS_PER_LONG - (n)))
45 #define	BITS_TO_LONGS(n)	howmany((n), BITS_PER_LONG)
46 #define	BIT_MASK(nr)		(1UL << ((nr) & (BITS_PER_LONG - 1)))
47 #define	BIT_WORD(nr)		((nr) / BITS_PER_LONG)
48 #define	GENMASK(h, l)		(((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
49 #define	GENMASK_ULL(h, l)	(((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
50 #define	BITS_PER_BYTE		8
51 #define	BITS_PER_TYPE(t)	(sizeof(t) * BITS_PER_BYTE)
52 #define	BITS_TO_BYTES(n)	howmany((n), BITS_PER_BYTE)
53 
54 #define	hweight8(x)	bitcount((uint8_t)(x))
55 #define	hweight16(x)	bitcount16(x)
56 #define	hweight32(x)	bitcount32(x)
57 #define	hweight64(x)	bitcount64(x)
58 #define	hweight_long(x)	bitcountl(x)
59 
60 #if __has_builtin(__builtin_popcountg)
61 #define	HWEIGHT8(x)	(__builtin_popcountg((uint8_t)(x)))
62 #define	HWEIGHT16(x)	(__builtin_popcountg((uint16_t)(x)))
63 #define	HWEIGHT32(x)	(__builtin_popcountg((uint32_t)(x)))
64 #define	HWEIGHT64(x)	(__builtin_popcountg((uint64_t)(x)))
65 #else
66 /* LLVM before 19, gcc before 14. */
67 #define	HWEIGHT8(x)	(__const_bitcount8((uint8_t)(x)))
68 #define	HWEIGHT16(x)	(__const_bitcount16((uint16_t)(x)))
69 #define	HWEIGHT32(x)	(__const_bitcount32((uint32_t)(x)))
70 #define	HWEIGHT64(x)	(__const_bitcount64((uint64_t)(x)))
71 #endif
72 
73 static inline int
__ffs(int mask)74 __ffs(int mask)
75 {
76 	return (ffs(mask) - 1);
77 }
78 
79 static inline int
__fls(int mask)80 __fls(int mask)
81 {
82 	return (fls(mask) - 1);
83 }
84 
85 static inline int
__ffsl(long mask)86 __ffsl(long mask)
87 {
88 	return (ffsl(mask) - 1);
89 }
90 
91 static inline unsigned long
__ffs64(uint64_t mask)92 __ffs64(uint64_t mask)
93 {
94 	return (ffsll(mask) - 1);
95 }
96 
97 static inline int
__flsl(long mask)98 __flsl(long mask)
99 {
100 	return (flsl(mask) - 1);
101 }
102 
103 static inline int
fls64(uint64_t mask)104 fls64(uint64_t mask)
105 {
106 	return (flsll(mask));
107 }
108 
109 static inline uint32_t
ror32(uint32_t word,unsigned int shift)110 ror32(uint32_t word, unsigned int shift)
111 {
112 	return ((word >> shift) | (word << (32 - shift)));
113 }
114 
115 #define	ffz(mask)	__ffs(~(mask))
116 
get_count_order(unsigned int count)117 static inline int get_count_order(unsigned int count)
118 {
119         int order;
120 
121         order = fls(count) - 1;
122         if (count & (count - 1))
123                 order++;
124         return order;
125 }
126 
127 static inline unsigned long
find_first_bit(const unsigned long * addr,unsigned long size)128 find_first_bit(const unsigned long *addr, unsigned long size)
129 {
130 	long mask;
131 	int bit;
132 
133 	for (bit = 0; size >= BITS_PER_LONG;
134 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
135 		if (*addr == 0)
136 			continue;
137 		return (bit + __ffsl(*addr));
138 	}
139 	if (size) {
140 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
141 		if (mask)
142 			bit += __ffsl(mask);
143 		else
144 			bit += size;
145 	}
146 	return (bit);
147 }
148 
149 static inline unsigned long
find_first_zero_bit(const unsigned long * addr,unsigned long size)150 find_first_zero_bit(const unsigned long *addr, unsigned long size)
151 {
152 	long mask;
153 	int bit;
154 
155 	for (bit = 0; size >= BITS_PER_LONG;
156 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
157 		if (~(*addr) == 0)
158 			continue;
159 		return (bit + __ffsl(~(*addr)));
160 	}
161 	if (size) {
162 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
163 		if (mask)
164 			bit += __ffsl(mask);
165 		else
166 			bit += size;
167 	}
168 	return (bit);
169 }
170 
171 static inline unsigned long
find_last_bit(const unsigned long * addr,unsigned long size)172 find_last_bit(const unsigned long *addr, unsigned long size)
173 {
174 	long mask;
175 	int offs;
176 	int bit;
177 	int pos;
178 
179 	pos = size / BITS_PER_LONG;
180 	offs = size % BITS_PER_LONG;
181 	bit = BITS_PER_LONG * pos;
182 	addr += pos;
183 	if (offs) {
184 		mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
185 		if (mask)
186 			return (bit + __flsl(mask));
187 	}
188 	while (pos--) {
189 		addr--;
190 		bit -= BITS_PER_LONG;
191 		if (*addr)
192 			return (bit + __flsl(*addr));
193 	}
194 	return (size);
195 }
196 
197 static inline unsigned long
find_next_bit(const unsigned long * addr,unsigned long size,unsigned long offset)198 find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
199 {
200 	long mask;
201 	int offs;
202 	int bit;
203 	int pos;
204 
205 	if (offset >= size)
206 		return (size);
207 	pos = offset / BITS_PER_LONG;
208 	offs = offset % BITS_PER_LONG;
209 	bit = BITS_PER_LONG * pos;
210 	addr += pos;
211 	if (offs) {
212 		mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
213 		if (mask)
214 			return (bit + __ffsl(mask));
215 		if (size - bit <= BITS_PER_LONG)
216 			return (size);
217 		bit += BITS_PER_LONG;
218 		addr++;
219 	}
220 	for (size -= bit; size >= BITS_PER_LONG;
221 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
222 		if (*addr == 0)
223 			continue;
224 		return (bit + __ffsl(*addr));
225 	}
226 	if (size) {
227 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
228 		if (mask)
229 			bit += __ffsl(mask);
230 		else
231 			bit += size;
232 	}
233 	return (bit);
234 }
235 
236 static inline unsigned long
find_next_zero_bit(const unsigned long * addr,unsigned long size,unsigned long offset)237 find_next_zero_bit(const unsigned long *addr, unsigned long size,
238     unsigned long offset)
239 {
240 	long mask;
241 	int offs;
242 	int bit;
243 	int pos;
244 
245 	if (offset >= size)
246 		return (size);
247 	pos = offset / BITS_PER_LONG;
248 	offs = offset % BITS_PER_LONG;
249 	bit = BITS_PER_LONG * pos;
250 	addr += pos;
251 	if (offs) {
252 		mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
253 		if (mask)
254 			return (bit + __ffsl(mask));
255 		if (size - bit <= BITS_PER_LONG)
256 			return (size);
257 		bit += BITS_PER_LONG;
258 		addr++;
259 	}
260 	for (size -= bit; size >= BITS_PER_LONG;
261 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
262 		if (~(*addr) == 0)
263 			continue;
264 		return (bit + __ffsl(~(*addr)));
265 	}
266 	if (size) {
267 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
268 		if (mask)
269 			bit += __ffsl(mask);
270 		else
271 			bit += size;
272 	}
273 	return (bit);
274 }
275 
276 #define	__set_bit(i, a)							\
277     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
278 
279 #define	set_bit(i, a)							\
280     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
281 
282 #define	__clear_bit(i, a)						\
283     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
284 
285 #define	clear_bit(i, a)							\
286     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
287 
288 #define	clear_bit_unlock(i, a)						\
289     atomic_clear_rel_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
290 
291 #define	test_bit(i, a)							\
292     !!(READ_ONCE(((volatile const unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
293 
294 static inline void
__assign_bit(long bit,volatile unsigned long * addr,bool value)295 __assign_bit(long bit, volatile unsigned long *addr, bool value)
296 {
297 	if (value)
298 		__set_bit(bit, addr);
299 	else
300 		__clear_bit(bit, addr);
301 }
302 
303 static inline int
test_and_clear_bit(long bit,volatile unsigned long * var)304 test_and_clear_bit(long bit, volatile unsigned long *var)
305 {
306 	long val;
307 
308 	var += BIT_WORD(bit);
309 	bit %= BITS_PER_LONG;
310 	bit = (1UL << bit);
311 
312 	val = *var;
313 	while (!atomic_fcmpset_long(var, &val, val & ~bit))
314 		;
315 	return !!(val & bit);
316 }
317 
318 static inline int
__test_and_clear_bit(long bit,volatile unsigned long * var)319 __test_and_clear_bit(long bit, volatile unsigned long *var)
320 {
321 	long val;
322 
323 	var += BIT_WORD(bit);
324 	bit %= BITS_PER_LONG;
325 	bit = (1UL << bit);
326 
327 	val = *var;
328 	*var &= ~bit;
329 
330 	return !!(val & bit);
331 }
332 
333 static inline int
test_and_set_bit(long bit,volatile unsigned long * var)334 test_and_set_bit(long bit, volatile unsigned long *var)
335 {
336 	long val;
337 
338 	var += BIT_WORD(bit);
339 	bit %= BITS_PER_LONG;
340 	bit = (1UL << bit);
341 
342 	val = *var;
343 	while (!atomic_fcmpset_long(var, &val, val | bit))
344 		;
345 	return !!(val & bit);
346 }
347 
348 static inline int
__test_and_set_bit(long bit,volatile unsigned long * var)349 __test_and_set_bit(long bit, volatile unsigned long *var)
350 {
351 	long val;
352 
353 	var += BIT_WORD(bit);
354 	bit %= BITS_PER_LONG;
355 	bit = (1UL << bit);
356 
357 	val = *var;
358 	*var |= bit;
359 
360 	return !!(val & bit);
361 }
362 
363 enum {
364         REG_OP_ISFREE,
365         REG_OP_ALLOC,
366         REG_OP_RELEASE,
367 };
368 
369 static inline int
linux_reg_op(unsigned long * bitmap,int pos,int order,int reg_op)370 linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
371 {
372         int nbits_reg;
373         int index;
374         int offset;
375         int nlongs_reg;
376         int nbitsinlong;
377         unsigned long mask;
378         int i;
379         int ret = 0;
380 
381         nbits_reg = 1 << order;
382         index = pos / BITS_PER_LONG;
383         offset = pos - (index * BITS_PER_LONG);
384         nlongs_reg = BITS_TO_LONGS(nbits_reg);
385         nbitsinlong = MIN(nbits_reg,  BITS_PER_LONG);
386 
387         mask = (1UL << (nbitsinlong - 1));
388         mask += mask - 1;
389         mask <<= offset;
390 
391         switch (reg_op) {
392         case REG_OP_ISFREE:
393                 for (i = 0; i < nlongs_reg; i++) {
394                         if (bitmap[index + i] & mask)
395                                 goto done;
396                 }
397                 ret = 1;
398                 break;
399 
400         case REG_OP_ALLOC:
401                 for (i = 0; i < nlongs_reg; i++)
402                         bitmap[index + i] |= mask;
403                 break;
404 
405         case REG_OP_RELEASE:
406                 for (i = 0; i < nlongs_reg; i++)
407                         bitmap[index + i] &= ~mask;
408                 break;
409         }
410 done:
411         return ret;
412 }
413 
414 #define for_each_set_bit(bit, addr, size) \
415 	for ((bit) = find_first_bit((addr), (size));		\
416 	     (bit) < (size);					\
417 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
418 
419 #define	for_each_clear_bit(bit, addr, size) \
420 	for ((bit) = find_first_zero_bit((addr), (size));		\
421 	     (bit) < (size);						\
422 	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
423 
424 static inline uint64_t
sign_extend64(uint64_t value,int index)425 sign_extend64(uint64_t value, int index)
426 {
427 	uint8_t shift = 63 - index;
428 
429 	return ((int64_t)(value << shift) >> shift);
430 }
431 
432 static inline uint32_t
sign_extend32(uint32_t value,int index)433 sign_extend32(uint32_t value, int index)
434 {
435 	uint8_t shift = 31 - index;
436 
437 	return ((int32_t)(value << shift) >> shift);
438 }
439 
440 #endif	/* _LINUXKPI_LINUX_BITOPS_H_ */
441