1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29 #ifndef _LINUXKPI_LINUX_BITOPS_H_
30 #define _LINUXKPI_LINUX_BITOPS_H_
31
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/errno.h>
36 #include <sys/libkern.h>
37
38 #define BIT(nr) (1UL << (nr))
39 #define BIT_ULL(nr) (1ULL << (nr))
40 #define BITS_PER_LONG (__SIZEOF_LONG__ * __CHAR_BIT__)
41 #define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * __CHAR_BIT__)
42
43 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
44 #define BITMAP_LAST_WORD_MASK(n) (~0UL >> (BITS_PER_LONG - (n)))
45 #define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
46 #define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG - 1)))
47 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
48 #define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
49 #define GENMASK_ULL(h, l) (((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
50 #define BITS_PER_BYTE 8
51 #define BITS_PER_TYPE(t) (sizeof(t) * BITS_PER_BYTE)
52 #define BITS_TO_BYTES(n) howmany((n), BITS_PER_BYTE)
53
54 #define hweight8(x) bitcount((uint8_t)(x))
55 #define hweight16(x) bitcount16(x)
56 #define hweight32(x) bitcount32(x)
57 #define hweight64(x) bitcount64(x)
58 #define hweight_long(x) bitcountl(x)
59
60 #define HWEIGHT8(x) (__builtin_popcountg((uint8_t)(x)))
61 #define HWEIGHT16(x) (__builtin_popcountg((uint16_t)(x)))
62 #define HWEIGHT32(x) (__builtin_popcountg((uint32_t)(x)))
63 #define HWEIGHT64(x) (__builtin_popcountg((uint64_t)(x)))
64
65 static inline int
__ffs(int mask)66 __ffs(int mask)
67 {
68 return (ffs(mask) - 1);
69 }
70
71 static inline int
__fls(int mask)72 __fls(int mask)
73 {
74 return (fls(mask) - 1);
75 }
76
77 static inline int
__ffsl(long mask)78 __ffsl(long mask)
79 {
80 return (ffsl(mask) - 1);
81 }
82
83 static inline unsigned long
__ffs64(uint64_t mask)84 __ffs64(uint64_t mask)
85 {
86 return (ffsll(mask) - 1);
87 }
88
89 static inline int
__flsl(long mask)90 __flsl(long mask)
91 {
92 return (flsl(mask) - 1);
93 }
94
95 static inline int
fls64(uint64_t mask)96 fls64(uint64_t mask)
97 {
98 return (flsll(mask));
99 }
100
101 static inline uint32_t
ror32(uint32_t word,unsigned int shift)102 ror32(uint32_t word, unsigned int shift)
103 {
104 return ((word >> shift) | (word << (32 - shift)));
105 }
106
107 #define ffz(mask) __ffs(~(mask))
108
get_count_order(unsigned int count)109 static inline int get_count_order(unsigned int count)
110 {
111 int order;
112
113 order = fls(count) - 1;
114 if (count & (count - 1))
115 order++;
116 return order;
117 }
118
119 static inline unsigned long
find_first_bit(const unsigned long * addr,unsigned long size)120 find_first_bit(const unsigned long *addr, unsigned long size)
121 {
122 long mask;
123 int bit;
124
125 for (bit = 0; size >= BITS_PER_LONG;
126 size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
127 if (*addr == 0)
128 continue;
129 return (bit + __ffsl(*addr));
130 }
131 if (size) {
132 mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
133 if (mask)
134 bit += __ffsl(mask);
135 else
136 bit += size;
137 }
138 return (bit);
139 }
140
141 static inline unsigned long
find_first_zero_bit(const unsigned long * addr,unsigned long size)142 find_first_zero_bit(const unsigned long *addr, unsigned long size)
143 {
144 long mask;
145 int bit;
146
147 for (bit = 0; size >= BITS_PER_LONG;
148 size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
149 if (~(*addr) == 0)
150 continue;
151 return (bit + __ffsl(~(*addr)));
152 }
153 if (size) {
154 mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
155 if (mask)
156 bit += __ffsl(mask);
157 else
158 bit += size;
159 }
160 return (bit);
161 }
162
163 static inline unsigned long
find_last_bit(const unsigned long * addr,unsigned long size)164 find_last_bit(const unsigned long *addr, unsigned long size)
165 {
166 long mask;
167 int offs;
168 int bit;
169 int pos;
170
171 pos = size / BITS_PER_LONG;
172 offs = size % BITS_PER_LONG;
173 bit = BITS_PER_LONG * pos;
174 addr += pos;
175 if (offs) {
176 mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
177 if (mask)
178 return (bit + __flsl(mask));
179 }
180 while (pos--) {
181 addr--;
182 bit -= BITS_PER_LONG;
183 if (*addr)
184 return (bit + __flsl(*addr));
185 }
186 return (size);
187 }
188
189 static inline unsigned long
find_next_bit(const unsigned long * addr,unsigned long size,unsigned long offset)190 find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
191 {
192 long mask;
193 int offs;
194 int bit;
195 int pos;
196
197 if (offset >= size)
198 return (size);
199 pos = offset / BITS_PER_LONG;
200 offs = offset % BITS_PER_LONG;
201 bit = BITS_PER_LONG * pos;
202 addr += pos;
203 if (offs) {
204 mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
205 if (mask)
206 return (bit + __ffsl(mask));
207 if (size - bit <= BITS_PER_LONG)
208 return (size);
209 bit += BITS_PER_LONG;
210 addr++;
211 }
212 for (size -= bit; size >= BITS_PER_LONG;
213 size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
214 if (*addr == 0)
215 continue;
216 return (bit + __ffsl(*addr));
217 }
218 if (size) {
219 mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
220 if (mask)
221 bit += __ffsl(mask);
222 else
223 bit += size;
224 }
225 return (bit);
226 }
227
228 static inline unsigned long
find_next_zero_bit(const unsigned long * addr,unsigned long size,unsigned long offset)229 find_next_zero_bit(const unsigned long *addr, unsigned long size,
230 unsigned long offset)
231 {
232 long mask;
233 int offs;
234 int bit;
235 int pos;
236
237 if (offset >= size)
238 return (size);
239 pos = offset / BITS_PER_LONG;
240 offs = offset % BITS_PER_LONG;
241 bit = BITS_PER_LONG * pos;
242 addr += pos;
243 if (offs) {
244 mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
245 if (mask)
246 return (bit + __ffsl(mask));
247 if (size - bit <= BITS_PER_LONG)
248 return (size);
249 bit += BITS_PER_LONG;
250 addr++;
251 }
252 for (size -= bit; size >= BITS_PER_LONG;
253 size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
254 if (~(*addr) == 0)
255 continue;
256 return (bit + __ffsl(~(*addr)));
257 }
258 if (size) {
259 mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
260 if (mask)
261 bit += __ffsl(mask);
262 else
263 bit += size;
264 }
265 return (bit);
266 }
267
268 #define __set_bit(i, a) \
269 atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
270
271 #define set_bit(i, a) \
272 atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
273
274 #define __clear_bit(i, a) \
275 atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
276
277 #define clear_bit(i, a) \
278 atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
279
280 #define clear_bit_unlock(i, a) \
281 atomic_clear_rel_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
282
283 #define test_bit(i, a) \
284 !!(READ_ONCE(((volatile const unsigned long *)(a))[BIT_WORD(i)]) & BIT_MASK(i))
285
286 static inline void
__assign_bit(long bit,volatile unsigned long * addr,bool value)287 __assign_bit(long bit, volatile unsigned long *addr, bool value)
288 {
289 if (value)
290 __set_bit(bit, addr);
291 else
292 __clear_bit(bit, addr);
293 }
294
295 static inline int
test_and_clear_bit(long bit,volatile unsigned long * var)296 test_and_clear_bit(long bit, volatile unsigned long *var)
297 {
298 long val;
299
300 var += BIT_WORD(bit);
301 bit %= BITS_PER_LONG;
302 bit = (1UL << bit);
303
304 val = *var;
305 while (!atomic_fcmpset_long(var, &val, val & ~bit))
306 ;
307 return !!(val & bit);
308 }
309
310 static inline int
__test_and_clear_bit(long bit,volatile unsigned long * var)311 __test_and_clear_bit(long bit, volatile unsigned long *var)
312 {
313 long val;
314
315 var += BIT_WORD(bit);
316 bit %= BITS_PER_LONG;
317 bit = (1UL << bit);
318
319 val = *var;
320 *var &= ~bit;
321
322 return !!(val & bit);
323 }
324
325 static inline int
test_and_set_bit(long bit,volatile unsigned long * var)326 test_and_set_bit(long bit, volatile unsigned long *var)
327 {
328 long val;
329
330 var += BIT_WORD(bit);
331 bit %= BITS_PER_LONG;
332 bit = (1UL << bit);
333
334 val = *var;
335 while (!atomic_fcmpset_long(var, &val, val | bit))
336 ;
337 return !!(val & bit);
338 }
339
340 static inline int
__test_and_set_bit(long bit,volatile unsigned long * var)341 __test_and_set_bit(long bit, volatile unsigned long *var)
342 {
343 long val;
344
345 var += BIT_WORD(bit);
346 bit %= BITS_PER_LONG;
347 bit = (1UL << bit);
348
349 val = *var;
350 *var |= bit;
351
352 return !!(val & bit);
353 }
354
355 enum {
356 REG_OP_ISFREE,
357 REG_OP_ALLOC,
358 REG_OP_RELEASE,
359 };
360
361 static inline int
linux_reg_op(unsigned long * bitmap,int pos,int order,int reg_op)362 linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
363 {
364 int nbits_reg;
365 int index;
366 int offset;
367 int nlongs_reg;
368 int nbitsinlong;
369 unsigned long mask;
370 int i;
371 int ret = 0;
372
373 nbits_reg = 1 << order;
374 index = pos / BITS_PER_LONG;
375 offset = pos - (index * BITS_PER_LONG);
376 nlongs_reg = BITS_TO_LONGS(nbits_reg);
377 nbitsinlong = MIN(nbits_reg, BITS_PER_LONG);
378
379 mask = (1UL << (nbitsinlong - 1));
380 mask += mask - 1;
381 mask <<= offset;
382
383 switch (reg_op) {
384 case REG_OP_ISFREE:
385 for (i = 0; i < nlongs_reg; i++) {
386 if (bitmap[index + i] & mask)
387 goto done;
388 }
389 ret = 1;
390 break;
391
392 case REG_OP_ALLOC:
393 for (i = 0; i < nlongs_reg; i++)
394 bitmap[index + i] |= mask;
395 break;
396
397 case REG_OP_RELEASE:
398 for (i = 0; i < nlongs_reg; i++)
399 bitmap[index + i] &= ~mask;
400 break;
401 }
402 done:
403 return ret;
404 }
405
406 #define for_each_set_bit(bit, addr, size) \
407 for ((bit) = find_first_bit((addr), (size)); \
408 (bit) < (size); \
409 (bit) = find_next_bit((addr), (size), (bit) + 1))
410
411 #define for_each_clear_bit(bit, addr, size) \
412 for ((bit) = find_first_zero_bit((addr), (size)); \
413 (bit) < (size); \
414 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
415
416 static inline uint64_t
sign_extend64(uint64_t value,int index)417 sign_extend64(uint64_t value, int index)
418 {
419 uint8_t shift = 63 - index;
420
421 return ((int64_t)(value << shift) >> shift);
422 }
423
424 static inline uint32_t
sign_extend32(uint32_t value,int index)425 sign_extend32(uint32_t value, int index)
426 {
427 uint8_t shift = 31 - index;
428
429 return ((int32_t)(value << shift) >> shift);
430 }
431
432 #endif /* _LINUXKPI_LINUX_BITOPS_H_ */
433