xref: /linux/arch/mips/include/asm/checksum.h (revision dc16c8a9ce980d03cfeedbc2559744140d134130)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle
7  * Copyright (C) 1999 Silicon Graphics, Inc.
8  * Copyright (C) 2001 Thiemo Seufer.
9  * Copyright (C) 2002 Maciej W. Rozycki
10  * Copyright (C) 2014 Imagination Technologies Ltd.
11  */
12 #ifndef _ASM_CHECKSUM_H
13 #define _ASM_CHECKSUM_H
14 
15 #ifdef CONFIG_GENERIC_CSUM
16 #include <asm-generic/checksum.h>
17 #else
18 
19 #include <linux/in6.h>
20 
21 #include <linux/uaccess.h>
22 
23 /*
24  * computes the checksum of a memory block at buff, length len,
25  * and adds in "sum" (32-bit)
26  *
27  * returns a 32-bit number suitable for feeding into itself
28  * or csum_tcpudp_magic
29  *
30  * this function must be called with even lengths, except
31  * for the last fragment, which may be odd
32  *
33  * it's best to have buff aligned on a 32-bit boundary
34  */
35 __wsum csum_partial(const void *buff, int len, __wsum sum);
36 
37 __wsum __csum_partial_copy_kernel(const void *src, void *dst,
38 				  int len, __wsum sum, int *err_ptr);
39 
40 __wsum __csum_partial_copy_from_user(const void *src, void *dst,
41 				     int len, __wsum sum, int *err_ptr);
42 __wsum __csum_partial_copy_to_user(const void *src, void *dst,
43 				   int len, __wsum sum, int *err_ptr);
44 /*
45  * this is a new version of the above that records errors it finds in *errp,
46  * but continues and zeros the rest of the buffer.
47  */
48 static inline
49 __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
50 				   __wsum sum, int *err_ptr)
51 {
52 	might_fault();
53 	if (uaccess_kernel())
54 		return __csum_partial_copy_kernel((__force void *)src, dst,
55 						  len, sum, err_ptr);
56 	else
57 		return __csum_partial_copy_from_user((__force void *)src, dst,
58 						     len, sum, err_ptr);
59 }
60 
61 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
62 static inline
63 __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
64 {
65 	__wsum sum = ~0U;
66 	int err = 0;
67 
68 	if (!access_ok(src, len))
69 		return 0;
70 	sum = csum_partial_copy_from_user(src, dst, len, sum, &err);
71 	return err ? 0 : sum;
72 }
73 
74 /*
75  * Copy and checksum to user
76  */
77 #define HAVE_CSUM_COPY_USER
78 static inline
79 __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
80 {
81 	int err = 0;
82 	__wsum sum = ~0U;
83 
84 	might_fault();
85 	if (!access_ok(dst, len))
86 		return 0;
87 	if (uaccess_kernel())
88 		sum = __csum_partial_copy_kernel(src,
89 						  (__force void *)dst,
90 						  len, sum, &err);
91 	else
92 		sum = __csum_partial_copy_to_user(src,
93 						   (__force void *)dst,
94 						   len, sum, &err);
95 	return err ? 0 : sum;
96 }
97 
98 /*
99  * the same as csum_partial, but copies from user space (but on MIPS
100  * we have just one address space, so this is identical to the above)
101  */
102 #define _HAVE_ARCH_CSUM_AND_COPY
103 __wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
104 static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
105 {
106 	return __csum_partial_copy_nocheck(src, dst, len, 0);
107 }
108 
109 /*
110  *	Fold a partial checksum without adding pseudo headers
111  */
112 static inline __sum16 csum_fold(__wsum csum)
113 {
114 	u32 sum = (__force u32)csum;
115 
116 	sum += (sum << 16);
117 	csum = (__force __wsum)(sum < (__force u32)csum);
118 	sum >>= 16;
119 	sum += (__force u32)csum;
120 
121 	return (__force __sum16)~sum;
122 }
123 #define csum_fold csum_fold
124 
125 /*
126  *	This is a version of ip_compute_csum() optimized for IP headers,
127  *	which always checksum on 4 octet boundaries.
128  *
129  *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
130  *	Arnt Gulbrandsen.
131  */
132 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
133 {
134 	const unsigned int *word = iph;
135 	const unsigned int *stop = word + ihl;
136 	unsigned int csum;
137 	int carry;
138 
139 	csum = word[0];
140 	csum += word[1];
141 	carry = (csum < word[1]);
142 	csum += carry;
143 
144 	csum += word[2];
145 	carry = (csum < word[2]);
146 	csum += carry;
147 
148 	csum += word[3];
149 	carry = (csum < word[3]);
150 	csum += carry;
151 
152 	word += 4;
153 	do {
154 		csum += *word;
155 		carry = (csum < *word);
156 		csum += carry;
157 		word++;
158 	} while (word != stop);
159 
160 	return csum_fold(csum);
161 }
162 #define ip_fast_csum ip_fast_csum
163 
164 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
165 					__u32 len, __u8 proto,
166 					__wsum sum)
167 {
168 	__asm__(
169 	"	.set	push		# csum_tcpudp_nofold\n"
170 	"	.set	noat		\n"
171 #ifdef CONFIG_32BIT
172 	"	addu	%0, %2		\n"
173 	"	sltu	$1, %0, %2	\n"
174 	"	addu	%0, $1		\n"
175 
176 	"	addu	%0, %3		\n"
177 	"	sltu	$1, %0, %3	\n"
178 	"	addu	%0, $1		\n"
179 
180 	"	addu	%0, %4		\n"
181 	"	sltu	$1, %0, %4	\n"
182 	"	addu	%0, $1		\n"
183 #endif
184 #ifdef CONFIG_64BIT
185 	"	daddu	%0, %2		\n"
186 	"	daddu	%0, %3		\n"
187 	"	daddu	%0, %4		\n"
188 	"	dsll32	$1, %0, 0	\n"
189 	"	daddu	%0, $1		\n"
190 	"	sltu	$1, %0, $1	\n"
191 	"	dsra32	%0, %0, 0	\n"
192 	"	addu	%0, $1		\n"
193 #endif
194 	"	.set	pop"
195 	: "=r" (sum)
196 	: "0" ((__force unsigned long)daddr),
197 	  "r" ((__force unsigned long)saddr),
198 #ifdef __MIPSEL__
199 	  "r" ((proto + len) << 8),
200 #else
201 	  "r" (proto + len),
202 #endif
203 	  "r" ((__force unsigned long)sum));
204 
205 	return sum;
206 }
207 #define csum_tcpudp_nofold csum_tcpudp_nofold
208 
209 /*
210  * this routine is used for miscellaneous IP-like checksums, mainly
211  * in icmp.c
212  */
213 static inline __sum16 ip_compute_csum(const void *buff, int len)
214 {
215 	return csum_fold(csum_partial(buff, len, 0));
216 }
217 
218 #define _HAVE_ARCH_IPV6_CSUM
219 static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
220 					  const struct in6_addr *daddr,
221 					  __u32 len, __u8 proto,
222 					  __wsum sum)
223 {
224 	__wsum tmp;
225 
226 	__asm__(
227 	"	.set	push		# csum_ipv6_magic\n"
228 	"	.set	noreorder	\n"
229 	"	.set	noat		\n"
230 	"	addu	%0, %5		# proto (long in network byte order)\n"
231 	"	sltu	$1, %0, %5	\n"
232 	"	addu	%0, $1		\n"
233 
234 	"	addu	%0, %6		# csum\n"
235 	"	sltu	$1, %0, %6	\n"
236 	"	lw	%1, 0(%2)	# four words source address\n"
237 	"	addu	%0, $1		\n"
238 	"	addu	%0, %1		\n"
239 	"	sltu	$1, %0, %1	\n"
240 
241 	"	lw	%1, 4(%2)	\n"
242 	"	addu	%0, $1		\n"
243 	"	addu	%0, %1		\n"
244 	"	sltu	$1, %0, %1	\n"
245 
246 	"	lw	%1, 8(%2)	\n"
247 	"	addu	%0, $1		\n"
248 	"	addu	%0, %1		\n"
249 	"	sltu	$1, %0, %1	\n"
250 
251 	"	lw	%1, 12(%2)	\n"
252 	"	addu	%0, $1		\n"
253 	"	addu	%0, %1		\n"
254 	"	sltu	$1, %0, %1	\n"
255 
256 	"	lw	%1, 0(%3)	\n"
257 	"	addu	%0, $1		\n"
258 	"	addu	%0, %1		\n"
259 	"	sltu	$1, %0, %1	\n"
260 
261 	"	lw	%1, 4(%3)	\n"
262 	"	addu	%0, $1		\n"
263 	"	addu	%0, %1		\n"
264 	"	sltu	$1, %0, %1	\n"
265 
266 	"	lw	%1, 8(%3)	\n"
267 	"	addu	%0, $1		\n"
268 	"	addu	%0, %1		\n"
269 	"	sltu	$1, %0, %1	\n"
270 
271 	"	lw	%1, 12(%3)	\n"
272 	"	addu	%0, $1		\n"
273 	"	addu	%0, %1		\n"
274 	"	sltu	$1, %0, %1	\n"
275 
276 	"	addu	%0, $1		# Add final carry\n"
277 	"	.set	pop"
278 	: "=&r" (sum), "=&r" (tmp)
279 	: "r" (saddr), "r" (daddr),
280 	  "0" (htonl(len)), "r" (htonl(proto)), "r" (sum));
281 
282 	return csum_fold(sum);
283 }
284 
285 #include <asm-generic/checksum.h>
286 #endif /* CONFIG_GENERIC_CSUM */
287 
288 #endif /* _ASM_CHECKSUM_H */
289