xref: /freebsd/sys/compat/linuxkpi/common/include/linux/bitops.h (revision f0cfa1b168014f56c02b83e5f28412cc5f78d117)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_BITOPS_H_
32 #define	_LINUX_BITOPS_H_
33 
34 #include <sys/param.h>
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/libkern.h>
39 
40 #define	BIT(nr)			(1UL << (nr))
41 #define	BIT_ULL(nr)		(1ULL << (nr))
42 #ifdef __LP64__
43 #define	BITS_PER_LONG		64
44 #else
45 #define	BITS_PER_LONG		32
46 #endif
47 
48 #define	BITMAP_FIRST_WORD_MASK(start)	(~0UL << ((start) % BITS_PER_LONG))
49 #define	BITMAP_LAST_WORD_MASK(n)	(~0UL >> (BITS_PER_LONG - (n)))
50 #define	BITS_TO_LONGS(n)	howmany((n), BITS_PER_LONG)
51 #define	BIT_MASK(nr)		(1UL << ((nr) & (BITS_PER_LONG - 1)))
52 #define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
53 #define	GENMASK(h, l)		(((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
54 #define BITS_PER_BYTE           8
55 
56 #define	hweight8(x)	bitcount((uint8_t)(x))
57 #define	hweight16(x)	bitcount16(x)
58 #define	hweight32(x)	bitcount32(x)
59 #define	hweight64(x)	bitcount64(x)
60 #define	hweight_long(x)	bitcountl(x)
61 
62 static inline int
63 __ffs(int mask)
64 {
65 	return (ffs(mask) - 1);
66 }
67 
68 static inline int
69 __fls(int mask)
70 {
71 	return (fls(mask) - 1);
72 }
73 
74 static inline int
75 __ffsl(long mask)
76 {
77 	return (ffsl(mask) - 1);
78 }
79 
80 static inline int
81 __flsl(long mask)
82 {
83 	return (flsl(mask) - 1);
84 }
85 
86 static inline int
87 fls64(uint64_t mask)
88 {
89 	return (flsll(mask));
90 }
91 
92 static inline uint32_t
93 ror32(uint32_t word, unsigned int shift)
94 {
95 	return ((word >> shift) | (word << (32 - shift)));
96 }
97 
98 #define	ffz(mask)	__ffs(~(mask))
99 
100 static inline int get_count_order(unsigned int count)
101 {
102         int order;
103 
104         order = fls(count) - 1;
105         if (count & (count - 1))
106                 order++;
107         return order;
108 }
109 
110 static inline unsigned long
111 find_first_bit(const unsigned long *addr, unsigned long size)
112 {
113 	long mask;
114 	int bit;
115 
116 	for (bit = 0; size >= BITS_PER_LONG;
117 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
118 		if (*addr == 0)
119 			continue;
120 		return (bit + __ffsl(*addr));
121 	}
122 	if (size) {
123 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
124 		if (mask)
125 			bit += __ffsl(mask);
126 		else
127 			bit += size;
128 	}
129 	return (bit);
130 }
131 
132 static inline unsigned long
133 find_first_zero_bit(const unsigned long *addr, unsigned long size)
134 {
135 	long mask;
136 	int bit;
137 
138 	for (bit = 0; size >= BITS_PER_LONG;
139 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
140 		if (~(*addr) == 0)
141 			continue;
142 		return (bit + __ffsl(~(*addr)));
143 	}
144 	if (size) {
145 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
146 		if (mask)
147 			bit += __ffsl(mask);
148 		else
149 			bit += size;
150 	}
151 	return (bit);
152 }
153 
154 static inline unsigned long
155 find_last_bit(const unsigned long *addr, unsigned long size)
156 {
157 	long mask;
158 	int offs;
159 	int bit;
160 	int pos;
161 
162 	pos = size / BITS_PER_LONG;
163 	offs = size % BITS_PER_LONG;
164 	bit = BITS_PER_LONG * pos;
165 	addr += pos;
166 	if (offs) {
167 		mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
168 		if (mask)
169 			return (bit + __flsl(mask));
170 	}
171 	while (pos--) {
172 		addr--;
173 		bit -= BITS_PER_LONG;
174 		if (*addr)
175 			return (bit + __flsl(*addr));
176 	}
177 	return (size);
178 }
179 
180 static inline unsigned long
181 find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
182 {
183 	long mask;
184 	int offs;
185 	int bit;
186 	int pos;
187 
188 	if (offset >= size)
189 		return (size);
190 	pos = offset / BITS_PER_LONG;
191 	offs = offset % BITS_PER_LONG;
192 	bit = BITS_PER_LONG * pos;
193 	addr += pos;
194 	if (offs) {
195 		mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
196 		if (mask)
197 			return (bit + __ffsl(mask));
198 		if (size - bit <= BITS_PER_LONG)
199 			return (size);
200 		bit += BITS_PER_LONG;
201 		addr++;
202 	}
203 	for (size -= bit; size >= BITS_PER_LONG;
204 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
205 		if (*addr == 0)
206 			continue;
207 		return (bit + __ffsl(*addr));
208 	}
209 	if (size) {
210 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
211 		if (mask)
212 			bit += __ffsl(mask);
213 		else
214 			bit += size;
215 	}
216 	return (bit);
217 }
218 
219 static inline unsigned long
220 find_next_zero_bit(const unsigned long *addr, unsigned long size,
221     unsigned long offset)
222 {
223 	long mask;
224 	int offs;
225 	int bit;
226 	int pos;
227 
228 	if (offset >= size)
229 		return (size);
230 	pos = offset / BITS_PER_LONG;
231 	offs = offset % BITS_PER_LONG;
232 	bit = BITS_PER_LONG * pos;
233 	addr += pos;
234 	if (offs) {
235 		mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
236 		if (mask)
237 			return (bit + __ffsl(mask));
238 		if (size - bit <= BITS_PER_LONG)
239 			return (size);
240 		bit += BITS_PER_LONG;
241 		addr++;
242 	}
243 	for (size -= bit; size >= BITS_PER_LONG;
244 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
245 		if (~(*addr) == 0)
246 			continue;
247 		return (bit + __ffsl(~(*addr)));
248 	}
249 	if (size) {
250 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
251 		if (mask)
252 			bit += __ffsl(mask);
253 		else
254 			bit += size;
255 	}
256 	return (bit);
257 }
258 
259 #define	__set_bit(i, a)							\
260     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
261 
262 #define	set_bit(i, a)							\
263     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
264 
265 #define	__clear_bit(i, a)						\
266     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
267 
268 #define	clear_bit(i, a)							\
269     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
270 
271 #define	test_bit(i, a)							\
272     !!(READ_ONCE(((volatile unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
273 
274 static inline int
275 test_and_clear_bit(long bit, volatile unsigned long *var)
276 {
277 	long val;
278 
279 	var += BIT_WORD(bit);
280 	bit %= BITS_PER_LONG;
281 	bit = (1UL << bit);
282 	do {
283 		val = *var;
284 	} while (atomic_cmpset_long(var, val, val & ~bit) == 0);
285 
286 	return !!(val & bit);
287 }
288 
289 static inline int
290 __test_and_clear_bit(long bit, volatile unsigned long *var)
291 {
292 	long val;
293 
294 	var += BIT_WORD(bit);
295 	bit %= BITS_PER_LONG;
296 	bit = (1UL << bit);
297 
298 	val = *var;
299 	*var &= ~bit;
300 
301 	return !!(val & bit);
302 }
303 
304 static inline int
305 test_and_set_bit(long bit, volatile unsigned long *var)
306 {
307 	long val;
308 
309 	var += BIT_WORD(bit);
310 	bit %= BITS_PER_LONG;
311 	bit = (1UL << bit);
312 	do {
313 		val = *var;
314 	} while (atomic_cmpset_long(var, val, val | bit) == 0);
315 
316 	return !!(val & bit);
317 }
318 
319 static inline int
320 __test_and_set_bit(long bit, volatile unsigned long *var)
321 {
322 	long val;
323 
324 	var += BIT_WORD(bit);
325 	bit %= BITS_PER_LONG;
326 	bit = (1UL << bit);
327 
328 	val = *var;
329 	*var |= bit;
330 
331 	return !!(val & bit);
332 }
333 
334 enum {
335         REG_OP_ISFREE,
336         REG_OP_ALLOC,
337         REG_OP_RELEASE,
338 };
339 
340 static inline int
341 linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
342 {
343         int nbits_reg;
344         int index;
345         int offset;
346         int nlongs_reg;
347         int nbitsinlong;
348         unsigned long mask;
349         int i;
350         int ret = 0;
351 
352         nbits_reg = 1 << order;
353         index = pos / BITS_PER_LONG;
354         offset = pos - (index * BITS_PER_LONG);
355         nlongs_reg = BITS_TO_LONGS(nbits_reg);
356         nbitsinlong = min(nbits_reg,  BITS_PER_LONG);
357 
358         mask = (1UL << (nbitsinlong - 1));
359         mask += mask - 1;
360         mask <<= offset;
361 
362         switch (reg_op) {
363         case REG_OP_ISFREE:
364                 for (i = 0; i < nlongs_reg; i++) {
365                         if (bitmap[index + i] & mask)
366                                 goto done;
367                 }
368                 ret = 1;
369                 break;
370 
371         case REG_OP_ALLOC:
372                 for (i = 0; i < nlongs_reg; i++)
373                         bitmap[index + i] |= mask;
374                 break;
375 
376         case REG_OP_RELEASE:
377                 for (i = 0; i < nlongs_reg; i++)
378                         bitmap[index + i] &= ~mask;
379                 break;
380         }
381 done:
382         return ret;
383 }
384 
385 #define for_each_set_bit(bit, addr, size) \
386 	for ((bit) = find_first_bit((addr), (size));		\
387 	     (bit) < (size);					\
388 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
389 
390 
391 static inline uint64_t
392 sign_extend64(uint64_t value, int index)
393 {
394 	uint8_t shift = 63 - index;
395 
396 	return ((int64_t)(value << shift) >> shift);
397 }
398 
399 #endif	/* _LINUX_BITOPS_H_ */
400