xref: /freebsd/sys/compat/linuxkpi/common/include/linux/bitops.h (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUXKPI_LINUX_BITOPS_H_
30 #define	_LINUXKPI_LINUX_BITOPS_H_
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/errno.h>
36 #include <sys/libkern.h>
37 
38 #define	BIT(nr)			(1UL << (nr))
39 #define	BIT_ULL(nr)		(1ULL << (nr))
40 #ifdef __LP64__
41 #define	BITS_PER_LONG		64
42 #else
43 #define	BITS_PER_LONG		32
44 #endif
45 
46 #define	BITS_PER_LONG_LONG	64
47 
48 #define	BITMAP_FIRST_WORD_MASK(start)	(~0UL << ((start) % BITS_PER_LONG))
49 #define	BITMAP_LAST_WORD_MASK(n)	(~0UL >> (BITS_PER_LONG - (n)))
50 #define	BITS_TO_LONGS(n)	howmany((n), BITS_PER_LONG)
51 #define	BIT_MASK(nr)		(1UL << ((nr) & (BITS_PER_LONG - 1)))
52 #define	BIT_WORD(nr)		((nr) / BITS_PER_LONG)
53 #define	GENMASK(h, l)		(((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
54 #define	GENMASK_ULL(h, l)	(((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
55 #define	BITS_PER_BYTE		8
56 #define	BITS_PER_TYPE(t)	(sizeof(t) * BITS_PER_BYTE)
57 
58 #define	hweight8(x)	bitcount((uint8_t)(x))
59 #define	hweight16(x)	bitcount16(x)
60 #define	hweight32(x)	bitcount32(x)
61 #define	hweight64(x)	bitcount64(x)
62 #define	hweight_long(x)	bitcountl(x)
63 
64 #define	HWEIGHT8(x)	(bitcount8((uint8_t)(x)) + 1)
65 #define	HWEIGHT16(x)	(bitcount16(x) + 1)
66 #define	HWEIGHT32(x)	(bitcount32(x) + 1)
67 #define	HWEIGHT64(x)	(bitcount64(x) + 1)
68 
69 static inline int
70 __ffs(int mask)
71 {
72 	return (ffs(mask) - 1);
73 }
74 
75 static inline int
76 __fls(int mask)
77 {
78 	return (fls(mask) - 1);
79 }
80 
81 static inline int
82 __ffsl(long mask)
83 {
84 	return (ffsl(mask) - 1);
85 }
86 
87 static inline unsigned long
88 __ffs64(uint64_t mask)
89 {
90 	return (ffsll(mask) - 1);
91 }
92 
93 static inline int
94 __flsl(long mask)
95 {
96 	return (flsl(mask) - 1);
97 }
98 
99 static inline int
100 fls64(uint64_t mask)
101 {
102 	return (flsll(mask));
103 }
104 
105 static inline uint32_t
106 ror32(uint32_t word, unsigned int shift)
107 {
108 	return ((word >> shift) | (word << (32 - shift)));
109 }
110 
111 #define	ffz(mask)	__ffs(~(mask))
112 
113 static inline int get_count_order(unsigned int count)
114 {
115         int order;
116 
117         order = fls(count) - 1;
118         if (count & (count - 1))
119                 order++;
120         return order;
121 }
122 
123 static inline unsigned long
124 find_first_bit(const unsigned long *addr, unsigned long size)
125 {
126 	long mask;
127 	int bit;
128 
129 	for (bit = 0; size >= BITS_PER_LONG;
130 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
131 		if (*addr == 0)
132 			continue;
133 		return (bit + __ffsl(*addr));
134 	}
135 	if (size) {
136 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
137 		if (mask)
138 			bit += __ffsl(mask);
139 		else
140 			bit += size;
141 	}
142 	return (bit);
143 }
144 
145 static inline unsigned long
146 find_first_zero_bit(const unsigned long *addr, unsigned long size)
147 {
148 	long mask;
149 	int bit;
150 
151 	for (bit = 0; size >= BITS_PER_LONG;
152 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
153 		if (~(*addr) == 0)
154 			continue;
155 		return (bit + __ffsl(~(*addr)));
156 	}
157 	if (size) {
158 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
159 		if (mask)
160 			bit += __ffsl(mask);
161 		else
162 			bit += size;
163 	}
164 	return (bit);
165 }
166 
167 static inline unsigned long
168 find_last_bit(const unsigned long *addr, unsigned long size)
169 {
170 	long mask;
171 	int offs;
172 	int bit;
173 	int pos;
174 
175 	pos = size / BITS_PER_LONG;
176 	offs = size % BITS_PER_LONG;
177 	bit = BITS_PER_LONG * pos;
178 	addr += pos;
179 	if (offs) {
180 		mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
181 		if (mask)
182 			return (bit + __flsl(mask));
183 	}
184 	while (pos--) {
185 		addr--;
186 		bit -= BITS_PER_LONG;
187 		if (*addr)
188 			return (bit + __flsl(*addr));
189 	}
190 	return (size);
191 }
192 
193 static inline unsigned long
194 find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
195 {
196 	long mask;
197 	int offs;
198 	int bit;
199 	int pos;
200 
201 	if (offset >= size)
202 		return (size);
203 	pos = offset / BITS_PER_LONG;
204 	offs = offset % BITS_PER_LONG;
205 	bit = BITS_PER_LONG * pos;
206 	addr += pos;
207 	if (offs) {
208 		mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
209 		if (mask)
210 			return (bit + __ffsl(mask));
211 		if (size - bit <= BITS_PER_LONG)
212 			return (size);
213 		bit += BITS_PER_LONG;
214 		addr++;
215 	}
216 	for (size -= bit; size >= BITS_PER_LONG;
217 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
218 		if (*addr == 0)
219 			continue;
220 		return (bit + __ffsl(*addr));
221 	}
222 	if (size) {
223 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
224 		if (mask)
225 			bit += __ffsl(mask);
226 		else
227 			bit += size;
228 	}
229 	return (bit);
230 }
231 
232 static inline unsigned long
233 find_next_zero_bit(const unsigned long *addr, unsigned long size,
234     unsigned long offset)
235 {
236 	long mask;
237 	int offs;
238 	int bit;
239 	int pos;
240 
241 	if (offset >= size)
242 		return (size);
243 	pos = offset / BITS_PER_LONG;
244 	offs = offset % BITS_PER_LONG;
245 	bit = BITS_PER_LONG * pos;
246 	addr += pos;
247 	if (offs) {
248 		mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
249 		if (mask)
250 			return (bit + __ffsl(mask));
251 		if (size - bit <= BITS_PER_LONG)
252 			return (size);
253 		bit += BITS_PER_LONG;
254 		addr++;
255 	}
256 	for (size -= bit; size >= BITS_PER_LONG;
257 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
258 		if (~(*addr) == 0)
259 			continue;
260 		return (bit + __ffsl(~(*addr)));
261 	}
262 	if (size) {
263 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
264 		if (mask)
265 			bit += __ffsl(mask);
266 		else
267 			bit += size;
268 	}
269 	return (bit);
270 }
271 
272 #define	__set_bit(i, a)							\
273     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
274 
275 #define	set_bit(i, a)							\
276     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
277 
278 #define	__clear_bit(i, a)						\
279     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
280 
281 #define	clear_bit(i, a)							\
282     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
283 
284 #define	clear_bit_unlock(i, a)						\
285     atomic_clear_rel_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
286 
287 #define	test_bit(i, a)							\
288     !!(READ_ONCE(((volatile const unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
289 
290 static inline int
291 test_and_clear_bit(long bit, volatile unsigned long *var)
292 {
293 	long val;
294 
295 	var += BIT_WORD(bit);
296 	bit %= BITS_PER_LONG;
297 	bit = (1UL << bit);
298 
299 	val = *var;
300 	while (!atomic_fcmpset_long(var, &val, val & ~bit))
301 		;
302 	return !!(val & bit);
303 }
304 
305 static inline int
306 __test_and_clear_bit(long bit, volatile unsigned long *var)
307 {
308 	long val;
309 
310 	var += BIT_WORD(bit);
311 	bit %= BITS_PER_LONG;
312 	bit = (1UL << bit);
313 
314 	val = *var;
315 	*var &= ~bit;
316 
317 	return !!(val & bit);
318 }
319 
320 static inline int
321 test_and_set_bit(long bit, volatile unsigned long *var)
322 {
323 	long val;
324 
325 	var += BIT_WORD(bit);
326 	bit %= BITS_PER_LONG;
327 	bit = (1UL << bit);
328 
329 	val = *var;
330 	while (!atomic_fcmpset_long(var, &val, val | bit))
331 		;
332 	return !!(val & bit);
333 }
334 
335 static inline int
336 __test_and_set_bit(long bit, volatile unsigned long *var)
337 {
338 	long val;
339 
340 	var += BIT_WORD(bit);
341 	bit %= BITS_PER_LONG;
342 	bit = (1UL << bit);
343 
344 	val = *var;
345 	*var |= bit;
346 
347 	return !!(val & bit);
348 }
349 
350 enum {
351         REG_OP_ISFREE,
352         REG_OP_ALLOC,
353         REG_OP_RELEASE,
354 };
355 
356 static inline int
357 linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
358 {
359         int nbits_reg;
360         int index;
361         int offset;
362         int nlongs_reg;
363         int nbitsinlong;
364         unsigned long mask;
365         int i;
366         int ret = 0;
367 
368         nbits_reg = 1 << order;
369         index = pos / BITS_PER_LONG;
370         offset = pos - (index * BITS_PER_LONG);
371         nlongs_reg = BITS_TO_LONGS(nbits_reg);
372         nbitsinlong = MIN(nbits_reg,  BITS_PER_LONG);
373 
374         mask = (1UL << (nbitsinlong - 1));
375         mask += mask - 1;
376         mask <<= offset;
377 
378         switch (reg_op) {
379         case REG_OP_ISFREE:
380                 for (i = 0; i < nlongs_reg; i++) {
381                         if (bitmap[index + i] & mask)
382                                 goto done;
383                 }
384                 ret = 1;
385                 break;
386 
387         case REG_OP_ALLOC:
388                 for (i = 0; i < nlongs_reg; i++)
389                         bitmap[index + i] |= mask;
390                 break;
391 
392         case REG_OP_RELEASE:
393                 for (i = 0; i < nlongs_reg; i++)
394                         bitmap[index + i] &= ~mask;
395                 break;
396         }
397 done:
398         return ret;
399 }
400 
401 #define for_each_set_bit(bit, addr, size) \
402 	for ((bit) = find_first_bit((addr), (size));		\
403 	     (bit) < (size);					\
404 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
405 
406 #define	for_each_clear_bit(bit, addr, size) \
407 	for ((bit) = find_first_zero_bit((addr), (size));		\
408 	     (bit) < (size);						\
409 	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
410 
411 static inline uint64_t
412 sign_extend64(uint64_t value, int index)
413 {
414 	uint8_t shift = 63 - index;
415 
416 	return ((int64_t)(value << shift) >> shift);
417 }
418 
419 static inline uint32_t
420 sign_extend32(uint32_t value, int index)
421 {
422 	uint8_t shift = 31 - index;
423 
424 	return ((int32_t)(value << shift) >> shift);
425 }
426 
427 #endif	/* _LINUXKPI_LINUX_BITOPS_H_ */
428