xref: /freebsd/sys/compat/linuxkpi/common/include/linux/bitops.h (revision a134ebd6e63f658f2d3d04ac0c60d23bcaa86dd7)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_BITOPS_H_
32 #define	_LINUX_BITOPS_H_
33 
34 #include <sys/param.h>
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/libkern.h>
39 
40 #define	BIT(nr)			(1UL << (nr))
41 #define	BIT_ULL(nr)		(1ULL << (nr))
42 #ifdef __LP64__
43 #define	BITS_PER_LONG		64
44 #else
45 #define	BITS_PER_LONG		32
46 #endif
47 
48 #define	BITS_PER_LONG_LONG	64
49 
50 #define	BITMAP_FIRST_WORD_MASK(start)	(~0UL << ((start) % BITS_PER_LONG))
51 #define	BITMAP_LAST_WORD_MASK(n)	(~0UL >> (BITS_PER_LONG - (n)))
52 #define	BITS_TO_LONGS(n)	howmany((n), BITS_PER_LONG)
53 #define	BIT_MASK(nr)		(1UL << ((nr) & (BITS_PER_LONG - 1)))
54 #define	BIT_WORD(nr)		((nr) / BITS_PER_LONG)
55 #define	GENMASK(h, l)		(((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
56 #define	GENMASK_ULL(h, l)	(((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
57 #define	BITS_PER_BYTE		8
58 #define	BITS_PER_TYPE(t)	(sizeof(t) * BITS_PER_BYTE)
59 
60 #define	hweight8(x)	bitcount((uint8_t)(x))
61 #define	hweight16(x)	bitcount16(x)
62 #define	hweight32(x)	bitcount32(x)
63 #define	hweight64(x)	bitcount64(x)
64 #define	hweight_long(x)	bitcountl(x)
65 
66 static inline int
67 __ffs(int mask)
68 {
69 	return (ffs(mask) - 1);
70 }
71 
72 static inline int
73 __fls(int mask)
74 {
75 	return (fls(mask) - 1);
76 }
77 
78 static inline int
79 __ffsl(long mask)
80 {
81 	return (ffsl(mask) - 1);
82 }
83 
84 static inline int
85 __flsl(long mask)
86 {
87 	return (flsl(mask) - 1);
88 }
89 
90 static inline int
91 fls64(uint64_t mask)
92 {
93 	return (flsll(mask));
94 }
95 
96 static inline uint32_t
97 ror32(uint32_t word, unsigned int shift)
98 {
99 	return ((word >> shift) | (word << (32 - shift)));
100 }
101 
102 #define	ffz(mask)	__ffs(~(mask))
103 
104 static inline int get_count_order(unsigned int count)
105 {
106         int order;
107 
108         order = fls(count) - 1;
109         if (count & (count - 1))
110                 order++;
111         return order;
112 }
113 
114 static inline unsigned long
115 find_first_bit(const unsigned long *addr, unsigned long size)
116 {
117 	long mask;
118 	int bit;
119 
120 	for (bit = 0; size >= BITS_PER_LONG;
121 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
122 		if (*addr == 0)
123 			continue;
124 		return (bit + __ffsl(*addr));
125 	}
126 	if (size) {
127 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
128 		if (mask)
129 			bit += __ffsl(mask);
130 		else
131 			bit += size;
132 	}
133 	return (bit);
134 }
135 
136 static inline unsigned long
137 find_first_zero_bit(const unsigned long *addr, unsigned long size)
138 {
139 	long mask;
140 	int bit;
141 
142 	for (bit = 0; size >= BITS_PER_LONG;
143 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
144 		if (~(*addr) == 0)
145 			continue;
146 		return (bit + __ffsl(~(*addr)));
147 	}
148 	if (size) {
149 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
150 		if (mask)
151 			bit += __ffsl(mask);
152 		else
153 			bit += size;
154 	}
155 	return (bit);
156 }
157 
158 static inline unsigned long
159 find_last_bit(const unsigned long *addr, unsigned long size)
160 {
161 	long mask;
162 	int offs;
163 	int bit;
164 	int pos;
165 
166 	pos = size / BITS_PER_LONG;
167 	offs = size % BITS_PER_LONG;
168 	bit = BITS_PER_LONG * pos;
169 	addr += pos;
170 	if (offs) {
171 		mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
172 		if (mask)
173 			return (bit + __flsl(mask));
174 	}
175 	while (pos--) {
176 		addr--;
177 		bit -= BITS_PER_LONG;
178 		if (*addr)
179 			return (bit + __flsl(*addr));
180 	}
181 	return (size);
182 }
183 
184 static inline unsigned long
185 find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
186 {
187 	long mask;
188 	int offs;
189 	int bit;
190 	int pos;
191 
192 	if (offset >= size)
193 		return (size);
194 	pos = offset / BITS_PER_LONG;
195 	offs = offset % BITS_PER_LONG;
196 	bit = BITS_PER_LONG * pos;
197 	addr += pos;
198 	if (offs) {
199 		mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
200 		if (mask)
201 			return (bit + __ffsl(mask));
202 		if (size - bit <= BITS_PER_LONG)
203 			return (size);
204 		bit += BITS_PER_LONG;
205 		addr++;
206 	}
207 	for (size -= bit; size >= BITS_PER_LONG;
208 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
209 		if (*addr == 0)
210 			continue;
211 		return (bit + __ffsl(*addr));
212 	}
213 	if (size) {
214 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
215 		if (mask)
216 			bit += __ffsl(mask);
217 		else
218 			bit += size;
219 	}
220 	return (bit);
221 }
222 
223 static inline unsigned long
224 find_next_zero_bit(const unsigned long *addr, unsigned long size,
225     unsigned long offset)
226 {
227 	long mask;
228 	int offs;
229 	int bit;
230 	int pos;
231 
232 	if (offset >= size)
233 		return (size);
234 	pos = offset / BITS_PER_LONG;
235 	offs = offset % BITS_PER_LONG;
236 	bit = BITS_PER_LONG * pos;
237 	addr += pos;
238 	if (offs) {
239 		mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
240 		if (mask)
241 			return (bit + __ffsl(mask));
242 		if (size - bit <= BITS_PER_LONG)
243 			return (size);
244 		bit += BITS_PER_LONG;
245 		addr++;
246 	}
247 	for (size -= bit; size >= BITS_PER_LONG;
248 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
249 		if (~(*addr) == 0)
250 			continue;
251 		return (bit + __ffsl(~(*addr)));
252 	}
253 	if (size) {
254 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
255 		if (mask)
256 			bit += __ffsl(mask);
257 		else
258 			bit += size;
259 	}
260 	return (bit);
261 }
262 
263 #define	__set_bit(i, a)							\
264     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
265 
266 #define	set_bit(i, a)							\
267     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
268 
269 #define	__clear_bit(i, a)						\
270     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
271 
272 #define	clear_bit(i, a)							\
273     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
274 
275 #define	test_bit(i, a)							\
276     !!(READ_ONCE(((volatile const unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
277 
278 static inline void
279 clear_bit_unlock(long bit, volatile unsigned long *var)
280 {
281 	clear_bit(bit, var);
282 	wmb();
283 }
284 
285 static inline int
286 test_and_clear_bit(long bit, volatile unsigned long *var)
287 {
288 	long val;
289 
290 	var += BIT_WORD(bit);
291 	bit %= BITS_PER_LONG;
292 	bit = (1UL << bit);
293 
294 	val = *var;
295 	while (!atomic_fcmpset_long(var, &val, val & ~bit))
296 		;
297 	return !!(val & bit);
298 }
299 
300 static inline int
301 __test_and_clear_bit(long bit, volatile unsigned long *var)
302 {
303 	long val;
304 
305 	var += BIT_WORD(bit);
306 	bit %= BITS_PER_LONG;
307 	bit = (1UL << bit);
308 
309 	val = *var;
310 	*var &= ~bit;
311 
312 	return !!(val & bit);
313 }
314 
315 static inline int
316 test_and_set_bit(long bit, volatile unsigned long *var)
317 {
318 	long val;
319 
320 	var += BIT_WORD(bit);
321 	bit %= BITS_PER_LONG;
322 	bit = (1UL << bit);
323 
324 	val = *var;
325 	while (!atomic_fcmpset_long(var, &val, val | bit))
326 		;
327 	return !!(val & bit);
328 }
329 
330 static inline int
331 __test_and_set_bit(long bit, volatile unsigned long *var)
332 {
333 	long val;
334 
335 	var += BIT_WORD(bit);
336 	bit %= BITS_PER_LONG;
337 	bit = (1UL << bit);
338 
339 	val = *var;
340 	*var |= bit;
341 
342 	return !!(val & bit);
343 }
344 
345 enum {
346         REG_OP_ISFREE,
347         REG_OP_ALLOC,
348         REG_OP_RELEASE,
349 };
350 
351 static inline int
352 linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
353 {
354         int nbits_reg;
355         int index;
356         int offset;
357         int nlongs_reg;
358         int nbitsinlong;
359         unsigned long mask;
360         int i;
361         int ret = 0;
362 
363         nbits_reg = 1 << order;
364         index = pos / BITS_PER_LONG;
365         offset = pos - (index * BITS_PER_LONG);
366         nlongs_reg = BITS_TO_LONGS(nbits_reg);
367         nbitsinlong = min(nbits_reg,  BITS_PER_LONG);
368 
369         mask = (1UL << (nbitsinlong - 1));
370         mask += mask - 1;
371         mask <<= offset;
372 
373         switch (reg_op) {
374         case REG_OP_ISFREE:
375                 for (i = 0; i < nlongs_reg; i++) {
376                         if (bitmap[index + i] & mask)
377                                 goto done;
378                 }
379                 ret = 1;
380                 break;
381 
382         case REG_OP_ALLOC:
383                 for (i = 0; i < nlongs_reg; i++)
384                         bitmap[index + i] |= mask;
385                 break;
386 
387         case REG_OP_RELEASE:
388                 for (i = 0; i < nlongs_reg; i++)
389                         bitmap[index + i] &= ~mask;
390                 break;
391         }
392 done:
393         return ret;
394 }
395 
396 #define for_each_set_bit(bit, addr, size) \
397 	for ((bit) = find_first_bit((addr), (size));		\
398 	     (bit) < (size);					\
399 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
400 
401 #define	for_each_clear_bit(bit, addr, size) \
402 	for ((bit) = find_first_zero_bit((addr), (size));		\
403 	     (bit) < (size);						\
404 	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
405 
406 static inline uint64_t
407 sign_extend64(uint64_t value, int index)
408 {
409 	uint8_t shift = 63 - index;
410 
411 	return ((int64_t)(value << shift) >> shift);
412 }
413 
414 #endif	/* _LINUX_BITOPS_H_ */
415