1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_WORD_AT_A_TIME_H 3 #define _ASM_WORD_AT_A_TIME_H 4 5 #include <linux/bitops.h> 6 #include <linux/wordpart.h> 7 #include <asm/asm-extable.h> 8 #include <asm/bitsperlong.h> 9 10 struct word_at_a_time { 11 const unsigned long bits; 12 }; 13 14 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x7f) } 15 16 static inline unsigned long prep_zero_mask(unsigned long val, unsigned long data, const struct word_at_a_time *c) 17 { 18 return data; 19 } 20 21 static inline unsigned long create_zero_mask(unsigned long data) 22 { 23 return __fls(data); 24 } 25 26 static inline unsigned long find_zero(unsigned long data) 27 { 28 return (data ^ (BITS_PER_LONG - 1)) >> 3; 29 } 30 31 static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) 32 { 33 unsigned long mask = (val & c->bits) + c->bits; 34 35 *data = ~(mask | val | c->bits); 36 return *data; 37 } 38 39 static inline unsigned long zero_bytemask(unsigned long data) 40 { 41 return ~1UL << data; 42 } 43 44 /* 45 * Load an unaligned word from kernel space. 46 * 47 * In the (very unlikely) case of the word being a page-crosser 48 * and the next page not being mapped, take the exception and 49 * return zeroes in the non-existing part. 50 */ 51 static inline unsigned long load_unaligned_zeropad(const void *addr) 52 { 53 unsigned long data; 54 55 asm volatile( 56 "0: lg %[data],0(%[addr])\n" 57 "1: nopr %%r7\n" 58 EX_TABLE_ZEROPAD(0b, 1b, %[data], %[addr]) 59 EX_TABLE_ZEROPAD(1b, 1b, %[data], %[addr]) 60 : [data] "=d" (data) 61 : [addr] "a" (addr), "m" (*(unsigned long *)addr)); 62 return data; 63 } 64 65 #endif /* _ASM_WORD_AT_A_TIME_H */ 66