xref: /linux/tools/testing/selftests/powerpc/primitives/word-at-a-time.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #ifndef _ASM_WORD_AT_A_TIME_H
2 #define _ASM_WORD_AT_A_TIME_H
3 
4 /*
5  * Word-at-a-time interfaces for PowerPC.
6  */
7 
8 #include <linux/kernel.h>
9 #include <asm/asm-compat.h>
10 
11 #ifdef __BIG_ENDIAN__
12 
13 struct word_at_a_time {
14 	const unsigned long high_bits, low_bits;
15 };
16 
17 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
18 
19 /* Bit set in the bytes that have a zero */
20 static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
21 {
22 	unsigned long mask = (val & c->low_bits) + c->low_bits;
23 	return ~(mask | rhs);
24 }
25 
26 #define create_zero_mask(mask) (mask)
27 
28 static inline long find_zero(unsigned long mask)
29 {
30 	long leading_zero_bits;
31 
32 	asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
33 	return leading_zero_bits >> 3;
34 }
35 
36 static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
37 {
38 	unsigned long rhs = val | c->low_bits;
39 	*data = rhs;
40 	return (val + c->high_bits) & ~rhs;
41 }
42 
43 static inline unsigned long zero_bytemask(unsigned long mask)
44 {
45 	return ~1ul << __fls(mask);
46 }
47 
48 #else
49 
50 #ifdef CONFIG_64BIT
51 
52 /* unused */
53 struct word_at_a_time {
54 };
55 
56 #define WORD_AT_A_TIME_CONSTANTS { }
57 
58 /* This will give us 0xff for a NULL char and 0x00 elsewhere */
59 static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
60 {
61 	unsigned long ret;
62 	unsigned long zero = 0;
63 
64 	asm("cmpb %0,%1,%2" : "=r" (ret) : "r" (a), "r" (zero));
65 	*bits = ret;
66 
67 	return ret;
68 }
69 
70 static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
71 {
72 	return bits;
73 }
74 
75 /* Alan Modra's little-endian strlen tail for 64-bit */
76 static inline unsigned long create_zero_mask(unsigned long bits)
77 {
78 	unsigned long leading_zero_bits;
79 	long trailing_zero_bit_mask;
80 
81 	asm("addi	%1,%2,-1\n\t"
82 	    "andc	%1,%1,%2\n\t"
83 	    "popcntd	%0,%1"
84 		: "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
85 		: "r" (bits));
86 
87 	return leading_zero_bits;
88 }
89 
90 static inline unsigned long find_zero(unsigned long mask)
91 {
92 	return mask >> 3;
93 }
94 
95 /* This assumes that we never ask for an all 1s bitmask */
96 static inline unsigned long zero_bytemask(unsigned long mask)
97 {
98 	return (1UL << mask) - 1;
99 }
100 
101 #else	/* 32-bit case */
102 
103 struct word_at_a_time {
104 	const unsigned long one_bits, high_bits;
105 };
106 
107 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
108 
109 /*
110  * This is largely generic for little-endian machines, but the
111  * optimal byte mask counting is probably going to be something
112  * that is architecture-specific. If you have a reliably fast
113  * bit count instruction, that might be better than the multiply
114  * and shift, for example.
115  */
116 
117 /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
118 static inline long count_masked_bytes(long mask)
119 {
120 	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
121 	long a = (0x0ff0001+mask) >> 23;
122 	/* Fix the 1 for 00 case */
123 	return a & mask;
124 }
125 
126 static inline unsigned long create_zero_mask(unsigned long bits)
127 {
128 	bits = (bits - 1) & ~bits;
129 	return bits >> 7;
130 }
131 
132 static inline unsigned long find_zero(unsigned long mask)
133 {
134 	return count_masked_bytes(mask);
135 }
136 
137 /* Return nonzero if it has a zero */
138 static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
139 {
140 	unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
141 	*bits = mask;
142 	return mask;
143 }
144 
145 static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
146 {
147 	return bits;
148 }
149 
150 /* The mask we created is directly usable as a bytemask */
151 #define zero_bytemask(mask) (mask)
152 
153 #endif /* CONFIG_64BIT */
154 
155 #endif /* __BIG_ENDIAN__ */
156 
157 /*
158  * We use load_unaligned_zero() in a selftest, which builds a userspace
159  * program. Some linker scripts seem to discard the .fixup section, so allow
160  * the test code to use a different section name.
161  */
162 #ifndef FIXUP_SECTION
163 #define FIXUP_SECTION ".fixup"
164 #endif
165 
166 static inline unsigned long load_unaligned_zeropad(const void *addr)
167 {
168 	unsigned long ret, offset, tmp;
169 
170 	asm(
171 	"1:	" PPC_LL "%[ret], 0(%[addr])\n"
172 	"2:\n"
173 	".section " FIXUP_SECTION ",\"ax\"\n"
174 	"3:	"
175 #ifdef __powerpc64__
176 	"clrrdi		%[tmp], %[addr], 3\n\t"
177 	"clrlsldi	%[offset], %[addr], 61, 3\n\t"
178 	"ld		%[ret], 0(%[tmp])\n\t"
179 #ifdef __BIG_ENDIAN__
180 	"sld		%[ret], %[ret], %[offset]\n\t"
181 #else
182 	"srd		%[ret], %[ret], %[offset]\n\t"
183 #endif
184 #else
185 	"clrrwi		%[tmp], %[addr], 2\n\t"
186 	"clrlslwi	%[offset], %[addr], 30, 3\n\t"
187 	"lwz		%[ret], 0(%[tmp])\n\t"
188 #ifdef __BIG_ENDIAN__
189 	"slw		%[ret], %[ret], %[offset]\n\t"
190 #else
191 	"srw		%[ret], %[ret], %[offset]\n\t"
192 #endif
193 #endif
194 	"b	2b\n"
195 	".previous\n"
196 	".section __ex_table,\"a\"\n\t"
197 		PPC_LONG_ALIGN "\n\t"
198 		PPC_LONG "1b,3b\n"
199 	".previous"
200 	: [tmp] "=&b" (tmp), [offset] "=&r" (offset), [ret] "=&r" (ret)
201 	: [addr] "b" (addr), "m" (*(unsigned long *)addr));
202 
203 	return ret;
204 }
205 
206 #undef FIXUP_SECTION
207 
208 #endif /* _ASM_WORD_AT_A_TIME_H */
209