1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright IBM Corp. 1999,2013 4 * 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 * 7 * The description below was taken in large parts from the powerpc 8 * bitops header file: 9 * Within a word, bits are numbered LSB first. Lot's of places make 10 * this assumption by directly testing bits with (val & (1<<nr)). 11 * This can cause confusion for large (> 1 word) bitmaps on a 12 * big-endian system because, unlike little endian, the number of each 13 * bit depends on the word size. 14 * 15 * The bitop functions are defined to work on unsigned longs, so the bits 16 * end up numbered: 17 * |63..............0|127............64|191...........128|255...........192| 18 * 19 * We also have special functions which work with an MSB0 encoding. 20 * The bits are numbered: 21 * |0..............63|64............127|128...........191|192...........255| 22 * 23 * The main difference is that bit 0-63 in the bit number field needs to be 24 * reversed compared to the LSB0 encoded bit fields. This can be achieved by 25 * XOR with 0x3f. 26 * 27 */ 28 29 #ifndef _S390_BITOPS_H 30 #define _S390_BITOPS_H 31 32 #ifndef _LINUX_BITOPS_H 33 #error only <linux/bitops.h> can be included directly 34 #endif 35 36 #include <linux/typecheck.h> 37 #include <linux/compiler.h> 38 #include <linux/types.h> 39 #include <asm/asm.h> 40 41 #define arch___set_bit generic___set_bit 42 #define arch___clear_bit generic___clear_bit 43 #define arch___change_bit generic___change_bit 44 #define arch___test_and_set_bit generic___test_and_set_bit 45 #define arch___test_and_clear_bit generic___test_and_clear_bit 46 #define arch___test_and_change_bit generic___test_and_change_bit 47 #define arch_test_bit_acquire generic_test_bit_acquire 48 49 static __always_inline bool arch_test_bit(unsigned long nr, const volatile unsigned long *ptr) 50 { 51 #ifdef __HAVE_ASM_FLAG_OUTPUTS__ 52 const volatile unsigned char *addr; 53 unsigned long mask; 54 int cc; 55 56 /* 57 * With CONFIG_PROFILE_ALL_BRANCHES enabled gcc fails to 58 * handle __builtin_constant_p() in some cases. 59 */ 60 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && __builtin_constant_p(nr)) { 61 addr = (const volatile unsigned char *)ptr; 62 addr += (nr ^ (BITS_PER_LONG - BITS_PER_BYTE)) / BITS_PER_BYTE; 63 mask = 1UL << (nr & (BITS_PER_BYTE - 1)); 64 asm volatile( 65 " tm %[addr],%[mask]\n" 66 : "=@cc" (cc) 67 : [addr] "Q" (*addr), [mask] "I" (mask) 68 ); 69 return cc == 3; 70 } 71 #endif 72 return generic_test_bit(nr, ptr); 73 } 74 75 #include <asm-generic/bitops/atomic.h> 76 #include <asm-generic/bitops/non-instrumented-non-atomic.h> 77 #include <asm-generic/bitops/lock.h> 78 79 /* 80 * Functions which use MSB0 bit numbering. 81 * The bits are numbered: 82 * |0..............63|64............127|128...........191|192...........255| 83 */ 84 unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size); 85 unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, 86 unsigned long offset); 87 88 #define for_each_set_bit_inv(bit, addr, size) \ 89 for ((bit) = find_first_bit_inv((addr), (size)); \ 90 (bit) < (size); \ 91 (bit) = find_next_bit_inv((addr), (size), (bit) + 1)) 92 93 static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr) 94 { 95 return set_bit(nr ^ (BITS_PER_LONG - 1), ptr); 96 } 97 98 static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) 99 { 100 return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); 101 } 102 103 static inline bool test_and_clear_bit_inv(unsigned long nr, 104 volatile unsigned long *ptr) 105 { 106 return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); 107 } 108 109 static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr) 110 { 111 return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr); 112 } 113 114 static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) 115 { 116 return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); 117 } 118 119 static inline bool test_bit_inv(unsigned long nr, 120 const volatile unsigned long *ptr) 121 { 122 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); 123 } 124 125 #ifndef CONFIG_CC_HAS_BUILTIN_FFS 126 127 /** 128 * __flogr - find leftmost one 129 * @word - The word to search 130 * 131 * Returns the bit number of the most significant bit set, 132 * where the most significant bit has bit number 0. 133 * If no bit is set this function returns 64. 134 */ 135 static __always_inline __attribute_const__ unsigned long __flogr(unsigned long word) 136 { 137 unsigned long bit; 138 139 if (__builtin_constant_p(word)) { 140 bit = 0; 141 if (!word) 142 return 64; 143 if (!(word & 0xffffffff00000000UL)) { 144 word <<= 32; 145 bit += 32; 146 } 147 if (!(word & 0xffff000000000000UL)) { 148 word <<= 16; 149 bit += 16; 150 } 151 if (!(word & 0xff00000000000000UL)) { 152 word <<= 8; 153 bit += 8; 154 } 155 if (!(word & 0xf000000000000000UL)) { 156 word <<= 4; 157 bit += 4; 158 } 159 if (!(word & 0xc000000000000000UL)) { 160 word <<= 2; 161 bit += 2; 162 } 163 if (!(word & 0x8000000000000000UL)) { 164 word <<= 1; 165 bit += 1; 166 } 167 return bit; 168 } else { 169 union register_pair rp __uninitialized; 170 171 rp.even = word; 172 asm("flogr %[rp],%[rp]" 173 : [rp] "+d" (rp.pair) : : "cc"); 174 bit = rp.even; 175 /* 176 * The result of the flogr instruction is a value in the range 177 * of 0..64. Let the compiler know that the AND operation can 178 * be optimized away. 179 */ 180 __assume(bit <= 64); 181 return bit & 127; 182 } 183 } 184 185 /** 186 * ffs - find first bit set 187 * @word: the word to search 188 * 189 * This is defined the same way as the libc and 190 * compiler builtin ffs routines (man ffs). 191 */ 192 static __always_inline __flatten __attribute_const__ int ffs(int word) 193 { 194 unsigned int val = (unsigned int)word; 195 196 return BITS_PER_LONG - __flogr(-val & val); 197 } 198 199 #else /* CONFIG_CC_HAS_BUILTIN_FFS */ 200 201 #include <asm-generic/bitops/builtin-ffs.h> 202 203 #endif /* CONFIG_CC_HAS_BUILTIN_FFS */ 204 205 #include <asm-generic/bitops/builtin-__ffs.h> 206 #include <asm-generic/bitops/ffz.h> 207 #include <asm-generic/bitops/builtin-__fls.h> 208 #include <asm-generic/bitops/builtin-fls.h> 209 #include <asm-generic/bitops/fls64.h> 210 #include <asm/arch_hweight.h> 211 #include <asm-generic/bitops/const_hweight.h> 212 #include <asm-generic/bitops/sched.h> 213 #include <asm-generic/bitops/le.h> 214 #include <asm-generic/bitops/ext2-atomic-setbit.h> 215 216 #endif /* _S390_BITOPS_H */ 217