xref: /freebsd/sys/compat/linuxkpi/common/include/linux/bitops.h (revision 3fd7fd4974c8000148360af2f02002f1c083681a)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUXKPI_LINUX_BITOPS_H_
30 #define	_LINUXKPI_LINUX_BITOPS_H_
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/errno.h>
36 #include <sys/libkern.h>
37 
38 #define	BIT(nr)			(1UL << (nr))
39 #define	BIT_ULL(nr)		(1ULL << (nr))
40 #ifdef __LP64__
41 #define	BITS_PER_LONG		64
42 #else
43 #define	BITS_PER_LONG		32
44 #endif
45 
46 #define	BITS_PER_LONG_LONG	64
47 
48 #define	BITMAP_FIRST_WORD_MASK(start)	(~0UL << ((start) % BITS_PER_LONG))
49 #define	BITMAP_LAST_WORD_MASK(n)	(~0UL >> (BITS_PER_LONG - (n)))
50 #define	BITS_TO_LONGS(n)	howmany((n), BITS_PER_LONG)
51 #define	BIT_MASK(nr)		(1UL << ((nr) & (BITS_PER_LONG - 1)))
52 #define	BIT_WORD(nr)		((nr) / BITS_PER_LONG)
53 #define	GENMASK(h, l)		(((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
54 #define	GENMASK_ULL(h, l)	(((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
55 #define	BITS_PER_BYTE		8
56 #define	BITS_PER_TYPE(t)	(sizeof(t) * BITS_PER_BYTE)
57 #define	BITS_TO_BYTES(n)	howmany((n), BITS_PER_BYTE)
58 
59 #define	hweight8(x)	bitcount((uint8_t)(x))
60 #define	hweight16(x)	bitcount16(x)
61 #define	hweight32(x)	bitcount32(x)
62 #define	hweight64(x)	bitcount64(x)
63 #define	hweight_long(x)	bitcountl(x)
64 
65 #define	HWEIGHT8(x)	(bitcount8((uint8_t)(x)) + 1)
66 #define	HWEIGHT16(x)	(bitcount16(x) + 1)
67 #define	HWEIGHT32(x)	(bitcount32(x) + 1)
68 #define	HWEIGHT64(x)	(bitcount64(x) + 1)
69 
70 static inline int
71 __ffs(int mask)
72 {
73 	return (ffs(mask) - 1);
74 }
75 
76 static inline int
77 __fls(int mask)
78 {
79 	return (fls(mask) - 1);
80 }
81 
82 static inline int
83 __ffsl(long mask)
84 {
85 	return (ffsl(mask) - 1);
86 }
87 
88 static inline unsigned long
89 __ffs64(uint64_t mask)
90 {
91 	return (ffsll(mask) - 1);
92 }
93 
94 static inline int
95 __flsl(long mask)
96 {
97 	return (flsl(mask) - 1);
98 }
99 
100 static inline int
101 fls64(uint64_t mask)
102 {
103 	return (flsll(mask));
104 }
105 
106 static inline uint32_t
107 ror32(uint32_t word, unsigned int shift)
108 {
109 	return ((word >> shift) | (word << (32 - shift)));
110 }
111 
112 #define	ffz(mask)	__ffs(~(mask))
113 
114 static inline int get_count_order(unsigned int count)
115 {
116         int order;
117 
118         order = fls(count) - 1;
119         if (count & (count - 1))
120                 order++;
121         return order;
122 }
123 
124 static inline unsigned long
125 find_first_bit(const unsigned long *addr, unsigned long size)
126 {
127 	long mask;
128 	int bit;
129 
130 	for (bit = 0; size >= BITS_PER_LONG;
131 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
132 		if (*addr == 0)
133 			continue;
134 		return (bit + __ffsl(*addr));
135 	}
136 	if (size) {
137 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
138 		if (mask)
139 			bit += __ffsl(mask);
140 		else
141 			bit += size;
142 	}
143 	return (bit);
144 }
145 
146 static inline unsigned long
147 find_first_zero_bit(const unsigned long *addr, unsigned long size)
148 {
149 	long mask;
150 	int bit;
151 
152 	for (bit = 0; size >= BITS_PER_LONG;
153 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
154 		if (~(*addr) == 0)
155 			continue;
156 		return (bit + __ffsl(~(*addr)));
157 	}
158 	if (size) {
159 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
160 		if (mask)
161 			bit += __ffsl(mask);
162 		else
163 			bit += size;
164 	}
165 	return (bit);
166 }
167 
168 static inline unsigned long
169 find_last_bit(const unsigned long *addr, unsigned long size)
170 {
171 	long mask;
172 	int offs;
173 	int bit;
174 	int pos;
175 
176 	pos = size / BITS_PER_LONG;
177 	offs = size % BITS_PER_LONG;
178 	bit = BITS_PER_LONG * pos;
179 	addr += pos;
180 	if (offs) {
181 		mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
182 		if (mask)
183 			return (bit + __flsl(mask));
184 	}
185 	while (pos--) {
186 		addr--;
187 		bit -= BITS_PER_LONG;
188 		if (*addr)
189 			return (bit + __flsl(*addr));
190 	}
191 	return (size);
192 }
193 
194 static inline unsigned long
195 find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
196 {
197 	long mask;
198 	int offs;
199 	int bit;
200 	int pos;
201 
202 	if (offset >= size)
203 		return (size);
204 	pos = offset / BITS_PER_LONG;
205 	offs = offset % BITS_PER_LONG;
206 	bit = BITS_PER_LONG * pos;
207 	addr += pos;
208 	if (offs) {
209 		mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
210 		if (mask)
211 			return (bit + __ffsl(mask));
212 		if (size - bit <= BITS_PER_LONG)
213 			return (size);
214 		bit += BITS_PER_LONG;
215 		addr++;
216 	}
217 	for (size -= bit; size >= BITS_PER_LONG;
218 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
219 		if (*addr == 0)
220 			continue;
221 		return (bit + __ffsl(*addr));
222 	}
223 	if (size) {
224 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
225 		if (mask)
226 			bit += __ffsl(mask);
227 		else
228 			bit += size;
229 	}
230 	return (bit);
231 }
232 
233 static inline unsigned long
234 find_next_zero_bit(const unsigned long *addr, unsigned long size,
235     unsigned long offset)
236 {
237 	long mask;
238 	int offs;
239 	int bit;
240 	int pos;
241 
242 	if (offset >= size)
243 		return (size);
244 	pos = offset / BITS_PER_LONG;
245 	offs = offset % BITS_PER_LONG;
246 	bit = BITS_PER_LONG * pos;
247 	addr += pos;
248 	if (offs) {
249 		mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
250 		if (mask)
251 			return (bit + __ffsl(mask));
252 		if (size - bit <= BITS_PER_LONG)
253 			return (size);
254 		bit += BITS_PER_LONG;
255 		addr++;
256 	}
257 	for (size -= bit; size >= BITS_PER_LONG;
258 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
259 		if (~(*addr) == 0)
260 			continue;
261 		return (bit + __ffsl(~(*addr)));
262 	}
263 	if (size) {
264 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
265 		if (mask)
266 			bit += __ffsl(mask);
267 		else
268 			bit += size;
269 	}
270 	return (bit);
271 }
272 
273 #define	__set_bit(i, a)							\
274     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
275 
276 #define	set_bit(i, a)							\
277     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
278 
279 #define	__clear_bit(i, a)						\
280     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
281 
282 #define	clear_bit(i, a)							\
283     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
284 
285 #define	clear_bit_unlock(i, a)						\
286     atomic_clear_rel_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
287 
288 #define	test_bit(i, a)							\
289     !!(READ_ONCE(((volatile const unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
290 
291 static inline void
292 __assign_bit(long bit, volatile unsigned long *addr, bool value)
293 {
294 	if (value)
295 		__set_bit(bit, addr);
296 	else
297 		__clear_bit(bit, addr);
298 }
299 
300 static inline int
301 test_and_clear_bit(long bit, volatile unsigned long *var)
302 {
303 	long val;
304 
305 	var += BIT_WORD(bit);
306 	bit %= BITS_PER_LONG;
307 	bit = (1UL << bit);
308 
309 	val = *var;
310 	while (!atomic_fcmpset_long(var, &val, val & ~bit))
311 		;
312 	return !!(val & bit);
313 }
314 
315 static inline int
316 __test_and_clear_bit(long bit, volatile unsigned long *var)
317 {
318 	long val;
319 
320 	var += BIT_WORD(bit);
321 	bit %= BITS_PER_LONG;
322 	bit = (1UL << bit);
323 
324 	val = *var;
325 	*var &= ~bit;
326 
327 	return !!(val & bit);
328 }
329 
330 static inline int
331 test_and_set_bit(long bit, volatile unsigned long *var)
332 {
333 	long val;
334 
335 	var += BIT_WORD(bit);
336 	bit %= BITS_PER_LONG;
337 	bit = (1UL << bit);
338 
339 	val = *var;
340 	while (!atomic_fcmpset_long(var, &val, val | bit))
341 		;
342 	return !!(val & bit);
343 }
344 
345 static inline int
346 __test_and_set_bit(long bit, volatile unsigned long *var)
347 {
348 	long val;
349 
350 	var += BIT_WORD(bit);
351 	bit %= BITS_PER_LONG;
352 	bit = (1UL << bit);
353 
354 	val = *var;
355 	*var |= bit;
356 
357 	return !!(val & bit);
358 }
359 
360 enum {
361         REG_OP_ISFREE,
362         REG_OP_ALLOC,
363         REG_OP_RELEASE,
364 };
365 
366 static inline int
367 linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
368 {
369         int nbits_reg;
370         int index;
371         int offset;
372         int nlongs_reg;
373         int nbitsinlong;
374         unsigned long mask;
375         int i;
376         int ret = 0;
377 
378         nbits_reg = 1 << order;
379         index = pos / BITS_PER_LONG;
380         offset = pos - (index * BITS_PER_LONG);
381         nlongs_reg = BITS_TO_LONGS(nbits_reg);
382         nbitsinlong = MIN(nbits_reg,  BITS_PER_LONG);
383 
384         mask = (1UL << (nbitsinlong - 1));
385         mask += mask - 1;
386         mask <<= offset;
387 
388         switch (reg_op) {
389         case REG_OP_ISFREE:
390                 for (i = 0; i < nlongs_reg; i++) {
391                         if (bitmap[index + i] & mask)
392                                 goto done;
393                 }
394                 ret = 1;
395                 break;
396 
397         case REG_OP_ALLOC:
398                 for (i = 0; i < nlongs_reg; i++)
399                         bitmap[index + i] |= mask;
400                 break;
401 
402         case REG_OP_RELEASE:
403                 for (i = 0; i < nlongs_reg; i++)
404                         bitmap[index + i] &= ~mask;
405                 break;
406         }
407 done:
408         return ret;
409 }
410 
411 #define for_each_set_bit(bit, addr, size) \
412 	for ((bit) = find_first_bit((addr), (size));		\
413 	     (bit) < (size);					\
414 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
415 
416 #define	for_each_clear_bit(bit, addr, size) \
417 	for ((bit) = find_first_zero_bit((addr), (size));		\
418 	     (bit) < (size);						\
419 	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
420 
421 static inline uint64_t
422 sign_extend64(uint64_t value, int index)
423 {
424 	uint8_t shift = 63 - index;
425 
426 	return ((int64_t)(value << shift) >> shift);
427 }
428 
429 static inline uint32_t
430 sign_extend32(uint32_t value, int index)
431 {
432 	uint8_t shift = 31 - index;
433 
434 	return ((int32_t)(value << shift) >> shift);
435 }
436 
437 #endif	/* _LINUXKPI_LINUX_BITOPS_H_ */
438