xref: /linux/arch/loongarch/include/asm/checksum.h (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2016 ARM Ltd.
4  * Copyright (C) 2023 Loongson Technology Corporation Limited
5  */
6 #ifndef __ASM_CHECKSUM_H
7 #define __ASM_CHECKSUM_H
8 
9 #include <linux/bitops.h>
10 #include <linux/in6.h>
11 
12 #define _HAVE_ARCH_IPV6_CSUM
13 __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
14 			const struct in6_addr *daddr,
15 			__u32 len, __u8 proto, __wsum sum);
16 
17 /*
18  * turns a 32-bit partial checksum (e.g. from csum_partial) into a
19  * 1's complement 16-bit checksum.
20  */
21 static inline __sum16 csum_fold(__wsum sum)
22 {
23 	u32 tmp = (__force u32)sum;
24 
25 	/*
26 	 * swap the two 16-bit halves of sum
27 	 * if there is a carry from adding the two 16-bit halves,
28 	 * it will carry from the lower half into the upper half,
29 	 * giving us the correct sum in the upper half.
30 	 */
31 	return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16);
32 }
33 #define csum_fold csum_fold
34 
35 /*
36  * This is a version of ip_compute_csum() optimized for IP headers,
37  * which always checksum on 4 octet boundaries.  ihl is the number
38  * of 32-bit words and is always >= 5.
39  */
40 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
41 {
42 	u64 sum;
43 	__uint128_t tmp;
44 	int n = ihl; /* we want it signed */
45 
46 	tmp = *(const __uint128_t *)iph;
47 	iph += 16;
48 	n -= 4;
49 	tmp += ((tmp >> 64) | (tmp << 64));
50 	sum = tmp >> 64;
51 	do {
52 		sum += *(const u32 *)iph;
53 		iph += 4;
54 	} while (--n > 0);
55 
56 	sum += ror64(sum, 32);
57 	return csum_fold((__force __wsum)(sum >> 32));
58 }
59 #define ip_fast_csum ip_fast_csum
60 
61 extern unsigned int do_csum(const unsigned char *buff, int len);
62 #define do_csum do_csum
63 
64 #include <asm-generic/checksum.h>
65 
66 #endif	/* __ASM_CHECKSUM_H */
67