xref: /linux/include/net/checksum.h (revision 2c7e4a2663a1ab5a740c59c31991579b6b865a26)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Checksumming functions for IP, TCP, UDP and so on
8  *
9  * Authors:	Jorge Cwik, <jorge@laser.satlink.net>
10  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11  *		Borrows very liberally from tcp.c and ip.c, see those
12  *		files for more names.
13  */
14 
15 #ifndef _CHECKSUM_H
16 #define _CHECKSUM_H
17 
18 #include <linux/errno.h>
19 #include <asm/types.h>
20 #include <asm/byteorder.h>
21 #include <asm/checksum.h>
22 #if !defined(_HAVE_ARCH_COPY_AND_CSUM_FROM_USER) || !defined(HAVE_CSUM_COPY_USER)
23 #include <linux/uaccess.h>
24 #endif
25 
26 #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
27 static __always_inline
csum_and_copy_from_user(const void __user * src,void * dst,int len)28 __wsum csum_and_copy_from_user (const void __user *src, void *dst,
29 				      int len)
30 {
31 	if (copy_from_user(dst, src, len))
32 		return 0;
33 	return csum_partial(dst, len, ~0U);
34 }
35 #endif
36 
37 #ifndef HAVE_CSUM_COPY_USER
csum_and_copy_to_user(const void * src,void __user * dst,int len)38 static __always_inline __wsum csum_and_copy_to_user
39 (const void *src, void __user *dst, int len)
40 {
41 	__wsum sum = csum_partial(src, len, ~0U);
42 
43 	if (copy_to_user(dst, src, len) == 0)
44 		return sum;
45 	return 0;
46 }
47 #endif
48 
49 #ifndef _HAVE_ARCH_CSUM_AND_COPY
50 static __always_inline __wsum
csum_partial_copy_nocheck(const void * src,void * dst,int len)51 csum_partial_copy_nocheck(const void *src, void *dst, int len)
52 {
53 	memcpy(dst, src, len);
54 	return csum_partial(dst, len, 0);
55 }
56 #endif
57 
58 #ifndef HAVE_ARCH_CSUM_ADD
csum_add(__wsum csum,__wsum addend)59 static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
60 {
61 	u32 res = (__force u32)csum;
62 	res += (__force u32)addend;
63 	return (__force __wsum)(res + (res < (__force u32)addend));
64 }
65 #endif
66 
csum_sub(__wsum csum,__wsum addend)67 static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
68 {
69 	return csum_add(csum, ~addend);
70 }
71 
csum16_add(__sum16 csum,__be16 addend)72 static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
73 {
74 	u16 res = (__force u16)csum;
75 
76 	res += (__force u16)addend;
77 	return (__force __sum16)(res + (res < (__force u16)addend));
78 }
79 
csum16_sub(__sum16 csum,__be16 addend)80 static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
81 {
82 	return csum16_add(csum, ~addend);
83 }
84 
85 #ifndef HAVE_ARCH_CSUM_SHIFT
csum_shift(__wsum sum,int offset)86 static __always_inline __wsum csum_shift(__wsum sum, int offset)
87 {
88 	/* rotate sum to align it with a 16b boundary */
89 	if (offset & 1)
90 		return (__force __wsum)ror32((__force u32)sum, 8);
91 	return sum;
92 }
93 #endif
94 
95 static __always_inline __wsum
csum_block_add(__wsum csum,__wsum csum2,int offset)96 csum_block_add(__wsum csum, __wsum csum2, int offset)
97 {
98 	return csum_add(csum, csum_shift(csum2, offset));
99 }
100 
101 static __always_inline __wsum
csum_block_sub(__wsum csum,__wsum csum2,int offset)102 csum_block_sub(__wsum csum, __wsum csum2, int offset)
103 {
104 	return csum_block_add(csum, ~csum2, offset);
105 }
106 
csum_unfold(__sum16 n)107 static __always_inline __wsum csum_unfold(__sum16 n)
108 {
109 	return (__force __wsum)n;
110 }
111 
112 #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
113 
csum_replace_by_diff(__sum16 * sum,__wsum diff)114 static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
115 {
116 	*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
117 }
118 
csum_replace4(__sum16 * sum,__be32 from,__be32 to)119 static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
120 {
121 	__wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
122 
123 	*sum = csum_fold(csum_add(tmp, (__force __wsum)to));
124 }
125 
126 /* Implements RFC 1624 (Incremental Internet Checksum)
127  * 3. Discussion states :
128  *     HC' = ~(~HC + ~m + m')
129  *  m : old value of a 16bit field
130  *  m' : new value of a 16bit field
131  */
csum_replace2(__sum16 * sum,__be16 old,__be16 new)132 static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
133 {
134 	*sum = ~csum16_add(csum16_sub(~(*sum), old), new);
135 }
136 
csum_replace(__wsum * csum,__wsum old,__wsum new)137 static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
138 {
139 	*csum = csum_add(csum_sub(*csum, old), new);
140 }
141 
csum_from32to16(unsigned int sum)142 static inline unsigned short csum_from32to16(unsigned int sum)
143 {
144 	sum += (sum >> 16) | (sum << 16);
145 	return (unsigned short)(sum >> 16);
146 }
147 
148 struct sk_buff;
149 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
150 			      __be32 from, __be32 to, bool pseudohdr);
151 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
152 			       const __be32 *from, const __be32 *to,
153 			       bool pseudohdr);
154 void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
155 				     __wsum diff, bool pseudohdr, bool ipv6);
156 
157 static __always_inline
inet_proto_csum_replace2(__sum16 * sum,struct sk_buff * skb,__be16 from,__be16 to,bool pseudohdr)158 void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
159 			      __be16 from, __be16 to, bool pseudohdr)
160 {
161 	inet_proto_csum_replace4(sum, skb, (__force __be32)from,
162 				 (__force __be32)to, pseudohdr);
163 }
164 
remcsum_adjust(void * ptr,__wsum csum,int start,int offset)165 static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
166 					     int start, int offset)
167 {
168 	__sum16 *psum = (__sum16 *)(ptr + offset);
169 	__wsum delta;
170 
171 	/* Subtract out checksum up to start */
172 	csum = csum_sub(csum, csum_partial(ptr, start, 0));
173 
174 	/* Set derived checksum in packet */
175 	delta = csum_sub((__force __wsum)csum_fold(csum),
176 			 (__force __wsum)*psum);
177 	*psum = csum_fold(csum);
178 
179 	return delta;
180 }
181 
remcsum_unadjust(__sum16 * psum,__wsum delta)182 static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
183 {
184 	*psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
185 }
186 
wsum_negate(__wsum val)187 static __always_inline __wsum wsum_negate(__wsum val)
188 {
189 	return (__force __wsum)-((__force u32)val);
190 }
191 #endif
192