xref: /linux/arch/riscv/lib/csum.c (revision 4a1d8ababde685a77fd4fd61e58f973cbdf29f8c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Checksum library
4  *
5  * Influenced by arch/arm64/lib/csum.c
6  * Copyright (C) 2023-2024 Rivos Inc.
7  */
8 #include <linux/bitops.h>
9 #include <linux/compiler.h>
10 #include <linux/jump_label.h>
11 #include <linux/kasan-checks.h>
12 #include <linux/kernel.h>
13 
14 #include <asm/cpufeature.h>
15 
16 #include <net/checksum.h>
17 
18 /* Default version is sufficient for 32 bit */
19 #ifndef CONFIG_32BIT
csum_ipv6_magic(const struct in6_addr * saddr,const struct in6_addr * daddr,__u32 len,__u8 proto,__wsum csum)20 __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
21 			const struct in6_addr *daddr,
22 			__u32 len, __u8 proto, __wsum csum)
23 {
24 	unsigned int ulen, uproto;
25 	unsigned long sum = (__force unsigned long)csum;
26 
27 	sum += (__force unsigned long)saddr->s6_addr32[0];
28 	sum += (__force unsigned long)saddr->s6_addr32[1];
29 	sum += (__force unsigned long)saddr->s6_addr32[2];
30 	sum += (__force unsigned long)saddr->s6_addr32[3];
31 
32 	sum += (__force unsigned long)daddr->s6_addr32[0];
33 	sum += (__force unsigned long)daddr->s6_addr32[1];
34 	sum += (__force unsigned long)daddr->s6_addr32[2];
35 	sum += (__force unsigned long)daddr->s6_addr32[3];
36 
37 	ulen = (__force unsigned int)htonl((unsigned int)len);
38 	sum += ulen;
39 
40 	uproto = (__force unsigned int)htonl(proto);
41 	sum += uproto;
42 
43 	if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
44 		unsigned long fold_temp;
45 
46 		/*
47 		 * Zbb is likely available when the kernel is compiled with Zbb
48 		 * support, so nop when Zbb is available and jump when Zbb is
49 		 * not available.
50 		 */
51 		asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
52 					      RISCV_ISA_EXT_ZBB, 1)
53 				  :
54 				  :
55 				  :
56 				  : no_zbb);
57 		asm(".option push					\n\
58 		.option arch,+zbb					\n\
59 			rori	%[fold_temp], %[sum], 32		\n\
60 			add	%[sum], %[fold_temp], %[sum]		\n\
61 			srli	%[sum], %[sum], 32			\n\
62 			not	%[fold_temp], %[sum]			\n\
63 			roriw	%[sum], %[sum], 16			\n\
64 			subw	%[sum], %[fold_temp], %[sum]		\n\
65 		.option pop"
66 		: [sum] "+r" (sum), [fold_temp] "=&r" (fold_temp));
67 		return (__force __sum16)(sum >> 16);
68 	}
69 no_zbb:
70 	sum += ror64(sum, 32);
71 	sum >>= 32;
72 	return csum_fold((__force __wsum)sum);
73 }
74 EXPORT_SYMBOL(csum_ipv6_magic);
75 #endif /* !CONFIG_32BIT */
76 
77 #ifdef CONFIG_32BIT
78 #define OFFSET_MASK 3
79 #elif CONFIG_64BIT
80 #define OFFSET_MASK 7
81 #endif
82 
83 static inline __no_sanitize_address unsigned long
do_csum_common(const unsigned long * ptr,const unsigned long * end,unsigned long data)84 do_csum_common(const unsigned long *ptr, const unsigned long *end,
85 	       unsigned long data)
86 {
87 	unsigned int shift;
88 	unsigned long csum = 0, carry = 0;
89 
90 	/*
91 	 * Do 32-bit reads on RV32 and 64-bit reads otherwise. This should be
92 	 * faster than doing 32-bit reads on architectures that support larger
93 	 * reads.
94 	 */
95 	while (ptr < end) {
96 		csum += data;
97 		carry += csum < data;
98 		data = *(ptr++);
99 	}
100 
101 	/*
102 	 * Perform alignment (and over-read) bytes on the tail if any bytes
103 	 * leftover.
104 	 */
105 	shift = ((long)ptr - (long)end) * 8;
106 #ifdef __LITTLE_ENDIAN
107 	data = (data << shift) >> shift;
108 #else
109 	data = (data >> shift) << shift;
110 #endif
111 	csum += data;
112 	carry += csum < data;
113 	csum += carry;
114 	csum += csum < carry;
115 
116 	return csum;
117 }
118 
119 /*
120  * Algorithm accounts for buff being misaligned.
121  * If buff is not aligned, will over-read bytes but not use the bytes that it
122  * shouldn't. The same thing will occur on the tail-end of the read.
123  */
124 static inline __no_sanitize_address unsigned int
do_csum_with_alignment(const unsigned char * buff,int len)125 do_csum_with_alignment(const unsigned char *buff, int len)
126 {
127 	unsigned int offset, shift;
128 	unsigned long csum, data;
129 	const unsigned long *ptr, *end;
130 
131 	/*
132 	 * Align address to closest word (double word on rv64) that comes before
133 	 * buff. This should always be in the same page and cache line.
134 	 * Directly call KASAN with the alignment we will be using.
135 	 */
136 	offset = (unsigned long)buff & OFFSET_MASK;
137 	kasan_check_read(buff, len);
138 	ptr = (const unsigned long *)(buff - offset);
139 
140 	/*
141 	 * Clear the most significant bytes that were over-read if buff was not
142 	 * aligned.
143 	 */
144 	shift = offset * 8;
145 	data = *(ptr++);
146 #ifdef __LITTLE_ENDIAN
147 	data = (data >> shift) << shift;
148 #else
149 	data = (data << shift) >> shift;
150 #endif
151 	end = (const unsigned long *)(buff + len);
152 	csum = do_csum_common(ptr, end, data);
153 
154 #ifdef CC_HAS_ASM_GOTO_TIED_OUTPUT
155 	if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
156 		unsigned long fold_temp;
157 
158 		/*
159 		 * Zbb is likely available when the kernel is compiled with Zbb
160 		 * support, so nop when Zbb is available and jump when Zbb is
161 		 * not available.
162 		 */
163 		asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
164 					      RISCV_ISA_EXT_ZBB, 1)
165 				  :
166 				  :
167 				  :
168 				  : no_zbb);
169 
170 #ifdef CONFIG_32BIT
171 		asm_goto_output(".option push			\n\
172 		.option arch,+zbb				\n\
173 			rori	%[fold_temp], %[csum], 16	\n\
174 			andi	%[offset], %[offset], 1		\n\
175 			add	%[csum], %[fold_temp], %[csum]	\n\
176 			beq	%[offset], zero, %l[end]	\n\
177 			rev8	%[csum], %[csum]		\n\
178 		.option pop"
179 			: [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
180 			: [offset] "r" (offset)
181 			:
182 			: end);
183 
184 		return (unsigned short)csum;
185 #else /* !CONFIG_32BIT */
186 		asm_goto_output(".option push			\n\
187 		.option arch,+zbb				\n\
188 			rori	%[fold_temp], %[csum], 32	\n\
189 			add	%[csum], %[fold_temp], %[csum]	\n\
190 			srli	%[csum], %[csum], 32		\n\
191 			roriw	%[fold_temp], %[csum], 16	\n\
192 			addw	%[csum], %[fold_temp], %[csum]	\n\
193 			andi	%[offset], %[offset], 1		\n\
194 			beq	%[offset], zero, %l[end]	\n\
195 			rev8	%[csum], %[csum]		\n\
196 		.option pop"
197 			: [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
198 			: [offset] "r" (offset)
199 			:
200 			: end);
201 
202 		return (csum << 16) >> 48;
203 #endif /* !CONFIG_32BIT */
204 end:
205 		return csum >> 16;
206 	}
207 no_zbb:
208 #endif /* CC_HAS_ASM_GOTO_TIED_OUTPUT */
209 #ifndef CONFIG_32BIT
210 	csum += ror64(csum, 32);
211 	csum >>= 32;
212 #endif
213 	csum = (u32)csum + ror32((u32)csum, 16);
214 	if (offset & 1)
215 		return (u16)swab32(csum);
216 	return csum >> 16;
217 }
218 
219 /*
220  * Does not perform alignment, should only be used if machine has fast
221  * misaligned accesses, or when buff is known to be aligned.
222  */
223 static inline __no_sanitize_address unsigned int
do_csum_no_alignment(const unsigned char * buff,int len)224 do_csum_no_alignment(const unsigned char *buff, int len)
225 {
226 	unsigned long csum, data;
227 	const unsigned long *ptr, *end;
228 
229 	ptr = (const unsigned long *)(buff);
230 	data = *(ptr++);
231 
232 	kasan_check_read(buff, len);
233 
234 	end = (const unsigned long *)(buff + len);
235 	csum = do_csum_common(ptr, end, data);
236 
237 	if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
238 		unsigned long fold_temp;
239 
240 		/*
241 		 * Zbb is likely available when the kernel is compiled with Zbb
242 		 * support, so nop when Zbb is available and jump when Zbb is
243 		 * not available.
244 		 */
245 		asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
246 					      RISCV_ISA_EXT_ZBB, 1)
247 				  :
248 				  :
249 				  :
250 				  : no_zbb);
251 
252 #ifdef CONFIG_32BIT
253 		asm (".option push				\n\
254 		.option arch,+zbb				\n\
255 			rori	%[fold_temp], %[csum], 16	\n\
256 			add	%[csum], %[fold_temp], %[csum]	\n\
257 		.option pop"
258 			: [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
259 			:
260 			: );
261 
262 #else /* !CONFIG_32BIT */
263 		asm (".option push				\n\
264 		.option arch,+zbb				\n\
265 			rori	%[fold_temp], %[csum], 32	\n\
266 			add	%[csum], %[fold_temp], %[csum]	\n\
267 			srli	%[csum], %[csum], 32		\n\
268 			roriw	%[fold_temp], %[csum], 16	\n\
269 			addw	%[csum], %[fold_temp], %[csum]	\n\
270 		.option pop"
271 			: [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
272 			:
273 			: );
274 #endif /* !CONFIG_32BIT */
275 		return csum >> 16;
276 	}
277 no_zbb:
278 #ifndef CONFIG_32BIT
279 	csum += ror64(csum, 32);
280 	csum >>= 32;
281 #endif
282 	csum = (u32)csum + ror32((u32)csum, 16);
283 	return csum >> 16;
284 }
285 
286 /*
287  * Perform a checksum on an arbitrary memory address.
288  * Will do a light-weight address alignment if buff is misaligned, unless
289  * cpu supports fast misaligned accesses.
290  */
do_csum(const unsigned char * buff,int len)291 unsigned int do_csum(const unsigned char *buff, int len)
292 {
293 	if (unlikely(len <= 0))
294 		return 0;
295 
296 	/*
297 	 * Significant performance gains can be seen by not doing alignment
298 	 * on machines with fast misaligned accesses.
299 	 *
300 	 * There is some duplicate code between the "with_alignment" and
301 	 * "no_alignment" implmentations, but the overlap is too awkward to be
302 	 * able to fit in one function without introducing multiple static
303 	 * branches. The largest chunk of overlap was delegated into the
304 	 * do_csum_common function.
305 	 */
306 	if (has_fast_unaligned_accesses() || (((unsigned long)buff & OFFSET_MASK) == 0))
307 		return do_csum_no_alignment(buff, len);
308 
309 	return do_csum_with_alignment(buff, len);
310 }
311