xref: /linux/arch/arm/include/asm/checksum.h (revision 25aee3debe0464f6c680173041fa3de30ec9ff54)
1 /*
2  *  arch/arm/include/asm/checksum.h
3  *
4  * IP checksum routines
5  *
6  * Copyright (C) Original authors of ../asm-i386/checksum.h
7  * Copyright (C) 1996-1999 Russell King
8  */
9 #ifndef __ASM_ARM_CHECKSUM_H
10 #define __ASM_ARM_CHECKSUM_H
11 
12 #include <linux/in6.h>
13 
14 /*
15  * computes the checksum of a memory block at buff, length len,
16  * and adds in "sum" (32-bit)
17  *
18  * returns a 32-bit number suitable for feeding into itself
19  * or csum_tcpudp_magic
20  *
21  * this function must be called with even lengths, except
22  * for the last fragment, which may be odd
23  *
24  * it's best to have buff aligned on a 32-bit boundary
25  */
26 __wsum csum_partial(const void *buff, int len, __wsum sum);
27 
28 /*
29  * the same as csum_partial, but copies from src while it
30  * checksums, and handles user-space pointer exceptions correctly, when needed.
31  *
32  * here even more important to align src and dst on a 32-bit (or even
33  * better 64-bit) boundary
34  */
35 
36 __wsum
37 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
38 
39 __wsum
40 csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
41 
42 /*
43  * 	Fold a partial checksum without adding pseudo headers
44  */
45 static inline __sum16 csum_fold(__wsum sum)
46 {
47 	__asm__(
48 	"add	%0, %1, %1, ror #16	@ csum_fold"
49 	: "=r" (sum)
50 	: "r" (sum)
51 	: "cc");
52 	return (__force __sum16)(~(__force u32)sum >> 16);
53 }
54 
55 /*
56  *	This is a version of ip_compute_csum() optimized for IP headers,
57  *	which always checksum on 4 octet boundaries.
58  */
59 static inline __sum16
60 ip_fast_csum(const void *iph, unsigned int ihl)
61 {
62 	unsigned int tmp1;
63 	__wsum sum;
64 
65 	__asm__ __volatile__(
66 	"ldr	%0, [%1], #4		@ ip_fast_csum		\n\
67 	ldr	%3, [%1], #4					\n\
68 	sub	%2, %2, #5					\n\
69 	adds	%0, %0, %3					\n\
70 	ldr	%3, [%1], #4					\n\
71 	adcs	%0, %0, %3					\n\
72 	ldr	%3, [%1], #4					\n\
73 1:	adcs	%0, %0, %3					\n\
74 	ldr	%3, [%1], #4					\n\
75 	tst	%2, #15			@ do this carefully	\n\
76 	subne	%2, %2, #1		@ without destroying	\n\
77 	bne	1b			@ the carry flag	\n\
78 	adcs	%0, %0, %3					\n\
79 	adc	%0, %0, #0"
80 	: "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
81 	: "1" (iph), "2" (ihl)
82 	: "cc", "memory");
83 	return csum_fold(sum);
84 }
85 
86 static inline __wsum
87 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
88 		   unsigned short proto, __wsum sum)
89 {
90 	__asm__(
91 	"adds	%0, %1, %2		@ csum_tcpudp_nofold	\n\
92 	adcs	%0, %0, %3					\n"
93 #ifdef __ARMEB__
94 	"adcs	%0, %0, %4					\n"
95 #else
96 	"adcs	%0, %0, %4, lsl #8				\n"
97 #endif
98 	"adcs	%0, %0, %5					\n\
99 	adc	%0, %0, #0"
100 	: "=&r"(sum)
101 	: "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto))
102 	: "cc");
103 	return sum;
104 }
105 /*
106  * computes the checksum of the TCP/UDP pseudo-header
107  * returns a 16-bit checksum, already complemented
108  */
109 static inline __sum16
110 csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
111 		  unsigned short proto, __wsum sum)
112 {
113 	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
114 }
115 
116 
117 /*
118  * this routine is used for miscellaneous IP-like checksums, mainly
119  * in icmp.c
120  */
121 static inline __sum16
122 ip_compute_csum(const void *buff, int len)
123 {
124 	return csum_fold(csum_partial(buff, len, 0));
125 }
126 
127 #define _HAVE_ARCH_IPV6_CSUM
128 extern __wsum
129 __csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
130 		__be32 proto, __wsum sum);
131 
132 static inline __sum16
133 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len,
134 		unsigned short proto, __wsum sum)
135 {
136 	return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
137 					   htonl(proto), sum));
138 }
139 #endif
140