Lines Matching +full:x +full:- +full:mask +full:-

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
13 * big-endian system because, unlike little endian, the number of each
22 * There are a few little-endian macros used mostly for filesystem
24 * byte-oriented:
27 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
28 * number field needs to be reversed compared to the big-endian bit
42 #include <asm/asm-compat.h>
46 #define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
48 #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
54 #define PPC_BITLSHIFT32(be) (32 - 1 - (be))
56 #define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be))|PPC_BIT32(bs))
58 #define PPC_BITLSHIFT8(be) (8 - 1 - (be))
60 #define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be))|PPC_BIT8(bs))
66 static inline void fn(unsigned long mask, \
76 "bne- 1b\n" \
78 : "rK" (mask), "r" (p) \
85 static __always_inline bool is_rlwinm_mask_valid(unsigned long x) in is_rlwinm_mask_valid() argument
87 if (!x) in is_rlwinm_mask_valid()
89 if (x & 1) in is_rlwinm_mask_valid()
90 x = ~x; // make the mask non-wrapping in is_rlwinm_mask_valid()
91 x += x & -x; // adding the low set bit results in at most one bit set in is_rlwinm_mask_valid()
93 return !(x & (x - 1)); in is_rlwinm_mask_valid()
97 static inline void fn(unsigned long mask, volatile unsigned long *_p) \
103 __builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) {\
109 "bne- 1b\n" \
111 : "n" (~mask), "r" (p) \
119 "bne- 1b\n" \
121 : "r" (mask), "r" (p) \
153 unsigned long mask, \
163 "bne- 1b\n" \
166 : "rK" (mask), "r" (p), "n" (eh) \
168 return (old & mask); \
178 static inline unsigned long test_and_clear_bits(unsigned long mask, volatile unsigned long *_p) in test_and_clear_bits() argument
184 __builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) { in test_and_clear_bits()
190 "bne- 1b\n" in test_and_clear_bits()
193 : "n" (~mask), "r" (p) in test_and_clear_bits()
201 "bne- 1b\n" in test_and_clear_bits()
204 : "r" (mask), "r" (p) in test_and_clear_bits()
208 return (old & mask); in test_and_clear_bits()
236 static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, in arch_xor_unlock_is_negative_byte() argument
246 "bne- 1b\n" in arch_xor_unlock_is_negative_byte()
248 : "r" (mask), "r" (p) in arch_xor_unlock_is_negative_byte()
255 #include <asm-generic/bitops/non-atomic.h>
264 * Return the zero-based bit position (LE, not IBM bit numbering) of
265 * the most significant 1-bit in a double word.
267 #define __ilog2(x) ilog2(x) argument
269 #include <asm-generic/bitops/ffz.h>
271 #include <asm-generic/bitops/builtin-__ffs.h>
273 #include <asm-generic/bitops/builtin-ffs.h>
276 * fls: find last (most-significant) bit set.
279 static __always_inline int fls(unsigned int x) in fls() argument
283 if (__builtin_constant_p(x)) in fls()
284 return x ? 32 - __builtin_clz(x) : 0; in fls()
285 asm("cntlzw %0,%1" : "=r" (lz) : "r" (x)); in fls()
286 return 32 - lz; in fls()
289 #include <asm-generic/bitops/builtin-__fls.h>
292 * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
293 * instruction; for 32-bit we use the generic version, which does two
294 * 32-bit fls calls.
297 static __always_inline int fls64(__u64 x) in fls64() argument
301 if (__builtin_constant_p(x)) in fls64()
302 return x ? 64 - __builtin_clzll(x) : 0; in fls64()
303 asm("cntlzd %0,%1" : "=r" (lz) : "r" (x)); in fls64()
304 return 64 - lz; in fls64()
307 #include <asm-generic/bitops/fls64.h>
315 #include <asm-generic/bitops/const_hweight.h>
317 #include <asm-generic/bitops/hweight.h>
321 #include <asm-generic/bitops/instrumented-atomic.h>
322 #include <asm-generic/bitops/instrumented-lock.h>
324 /* Little-endian versions */
325 #include <asm-generic/bitops/le.h>
329 #include <asm-generic/bitops/ext2-atomic-setbit.h>
331 #include <asm-generic/bitops/sched.h>