xref: /linux/arch/x86/include/asm/checksum_32.h (revision 367b8112fe2ea5c39a7bb4d263dcdd9b612fae18)
1 #ifndef _ASM_X86_CHECKSUM_32_H
2 #define _ASM_X86_CHECKSUM_32_H
3 
4 #include <linux/in6.h>
5 
6 #include <asm/uaccess.h>
7 
8 /*
9  * computes the checksum of a memory block at buff, length len,
10  * and adds in "sum" (32-bit)
11  *
12  * returns a 32-bit number suitable for feeding into itself
13  * or csum_tcpudp_magic
14  *
15  * this function must be called with even lengths, except
16  * for the last fragment, which may be odd
17  *
18  * it's best to have buff aligned on a 32-bit boundary
19  */
20 asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
21 
22 /*
23  * the same as csum_partial, but copies from src while it
24  * checksums, and handles user-space pointer exceptions correctly, when needed.
25  *
26  * here even more important to align src and dst on a 32-bit (or even
27  * better 64-bit) boundary
28  */
29 
30 asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
31 					    int len, __wsum sum,
32 					    int *src_err_ptr, int *dst_err_ptr);
33 
34 /*
35  *	Note: when you get a NULL pointer exception here this means someone
36  *	passed in an incorrect kernel address to one of these functions.
37  *
38  *	If you use these functions directly please don't forget the
39  *	access_ok().
40  */
41 static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
42 					       int len, __wsum sum)
43 {
44 	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
45 }
46 
47 static inline __wsum csum_partial_copy_from_user(const void __user *src,
48 						 void *dst,
49 						 int len, __wsum sum,
50 						 int *err_ptr)
51 {
52 	might_sleep();
53 	return csum_partial_copy_generic((__force void *)src, dst,
54 					 len, sum, err_ptr, NULL);
55 }
56 
57 /*
58  *	This is a version of ip_compute_csum() optimized for IP headers,
59  *	which always checksum on 4 octet boundaries.
60  *
61  *	By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
62  *	Arnt Gulbrandsen.
63  */
64 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
65 {
66 	unsigned int sum;
67 
68 	asm volatile("movl (%1), %0	;\n"
69 		     "subl $4, %2	;\n"
70 		     "jbe 2f		;\n"
71 		     "addl 4(%1), %0	;\n"
72 		     "adcl 8(%1), %0	;\n"
73 		     "adcl 12(%1), %0;\n"
74 		     "1:	adcl 16(%1), %0	;\n"
75 		     "lea 4(%1), %1	;\n"
76 		     "decl %2	;\n"
77 		     "jne 1b		;\n"
78 		     "adcl $0, %0	;\n"
79 		     "movl %0, %2	;\n"
80 		     "shrl $16, %0	;\n"
81 		     "addw %w2, %w0	;\n"
82 		     "adcl $0, %0	;\n"
83 		     "notl %0	;\n"
84 		     "2:		;\n"
85 	/* Since the input registers which are loaded with iph and ihl
86 	   are modified, we must also specify them as outputs, or gcc
87 	   will assume they contain their original values. */
88 		     : "=r" (sum), "=r" (iph), "=r" (ihl)
89 		     : "1" (iph), "2" (ihl)
90 		     : "memory");
91 	return (__force __sum16)sum;
92 }
93 
94 /*
95  *	Fold a partial checksum
96  */
97 
98 static inline __sum16 csum_fold(__wsum sum)
99 {
100 	asm("addl %1, %0		;\n"
101 	    "adcl $0xffff, %0	;\n"
102 	    : "=r" (sum)
103 	    : "r" ((__force u32)sum << 16),
104 	      "0" ((__force u32)sum & 0xffff0000));
105 	return (__force __sum16)(~(__force u32)sum >> 16);
106 }
107 
108 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
109 					unsigned short len,
110 					unsigned short proto,
111 					__wsum sum)
112 {
113 	asm("addl %1, %0	;\n"
114 	    "adcl %2, %0	;\n"
115 	    "adcl %3, %0	;\n"
116 	    "adcl $0, %0	;\n"
117 	    : "=r" (sum)
118 	    : "g" (daddr), "g"(saddr),
119 	      "g" ((len + proto) << 8), "0" (sum));
120 	return sum;
121 }
122 
123 /*
124  * computes the checksum of the TCP/UDP pseudo-header
125  * returns a 16-bit checksum, already complemented
126  */
127 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
128 					unsigned short len,
129 					unsigned short proto,
130 					__wsum sum)
131 {
132 	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
133 }
134 
135 /*
136  * this routine is used for miscellaneous IP-like checksums, mainly
137  * in icmp.c
138  */
139 
140 static inline __sum16 ip_compute_csum(const void *buff, int len)
141 {
142     return csum_fold(csum_partial(buff, len, 0));
143 }
144 
145 #define _HAVE_ARCH_IPV6_CSUM
146 static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
147 				      const struct in6_addr *daddr,
148 				      __u32 len, unsigned short proto,
149 				      __wsum sum)
150 {
151 	asm("addl 0(%1), %0	;\n"
152 	    "adcl 4(%1), %0	;\n"
153 	    "adcl 8(%1), %0	;\n"
154 	    "adcl 12(%1), %0	;\n"
155 	    "adcl 0(%2), %0	;\n"
156 	    "adcl 4(%2), %0	;\n"
157 	    "adcl 8(%2), %0	;\n"
158 	    "adcl 12(%2), %0	;\n"
159 	    "adcl %3, %0	;\n"
160 	    "adcl %4, %0	;\n"
161 	    "adcl $0, %0	;\n"
162 	    : "=&r" (sum)
163 	    : "r" (saddr), "r" (daddr),
164 	      "r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
165 
166 	return csum_fold(sum);
167 }
168 
169 /*
170  *	Copy and checksum to user
171  */
172 #define HAVE_CSUM_COPY_USER
173 static inline __wsum csum_and_copy_to_user(const void *src,
174 					   void __user *dst,
175 					   int len, __wsum sum,
176 					   int *err_ptr)
177 {
178 	might_sleep();
179 	if (access_ok(VERIFY_WRITE, dst, len))
180 		return csum_partial_copy_generic(src, (__force void *)dst,
181 						 len, sum, NULL, err_ptr);
182 
183 	if (len)
184 		*err_ptr = -EFAULT;
185 
186 	return (__force __wsum)-1; /* invalid checksum */
187 }
188 
189 #endif /* _ASM_X86_CHECKSUM_32_H */
190