1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1995, 96, 97, 98, 99, 2001 by Ralf Baechle 7 * Copyright (C) 1999 Silicon Graphics, Inc. 8 * Copyright (C) 2001 Thiemo Seufer. 9 * Copyright (C) 2002 Maciej W. Rozycki 10 * Copyright (C) 2014 Imagination Technologies Ltd. 11 */ 12 #ifndef _ASM_CHECKSUM_H 13 #define _ASM_CHECKSUM_H 14 15 #ifdef CONFIG_GENERIC_CSUM 16 #include <asm-generic/checksum.h> 17 #else 18 19 #include <linux/in6.h> 20 21 #include <linux/uaccess.h> 22 23 /* 24 * computes the checksum of a memory block at buff, length len, 25 * and adds in "sum" (32-bit) 26 * 27 * returns a 32-bit number suitable for feeding into itself 28 * or csum_tcpudp_magic 29 * 30 * this function must be called with even lengths, except 31 * for the last fragment, which may be odd 32 * 33 * it's best to have buff aligned on a 32-bit boundary 34 */ 35 __wsum csum_partial(const void *buff, int len, __wsum sum); 36 37 __wsum __csum_partial_copy_kernel(const void *src, void *dst, 38 int len, __wsum sum, int *err_ptr); 39 40 __wsum __csum_partial_copy_from_user(const void *src, void *dst, 41 int len, __wsum sum, int *err_ptr); 42 __wsum __csum_partial_copy_to_user(const void *src, void *dst, 43 int len, __wsum sum, int *err_ptr); 44 /* 45 * this is a new version of the above that records errors it finds in *errp, 46 * but continues and zeros the rest of the buffer. 47 */ 48 static inline 49 __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, 50 __wsum sum, int *err_ptr) 51 { 52 might_fault(); 53 if (uaccess_kernel()) 54 return __csum_partial_copy_kernel((__force void *)src, dst, 55 len, sum, err_ptr); 56 else 57 return __csum_partial_copy_from_user((__force void *)src, dst, 58 len, sum, err_ptr); 59 } 60 61 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 62 static inline 63 __wsum csum_and_copy_from_user(const void __user *src, void *dst, 64 int len, __wsum sum, int *err_ptr) 65 { 66 if (access_ok(src, len)) 67 return csum_partial_copy_from_user(src, dst, len, sum, 68 err_ptr); 69 if (len) 70 *err_ptr = -EFAULT; 71 72 return sum; 73 } 74 75 /* 76 * Copy and checksum to user 77 */ 78 #define HAVE_CSUM_COPY_USER 79 static inline 80 __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, 81 __wsum sum, int *err_ptr) 82 { 83 might_fault(); 84 if (access_ok(dst, len)) { 85 if (uaccess_kernel()) 86 return __csum_partial_copy_kernel(src, 87 (__force void *)dst, 88 len, sum, err_ptr); 89 else 90 return __csum_partial_copy_to_user(src, 91 (__force void *)dst, 92 len, sum, err_ptr); 93 } 94 if (len) 95 *err_ptr = -EFAULT; 96 97 return (__force __wsum)-1; /* invalid checksum */ 98 } 99 100 /* 101 * the same as csum_partial, but copies from user space (but on MIPS 102 * we have just one address space, so this is identical to the above) 103 */ 104 #define _HAVE_ARCH_CSUM_AND_COPY 105 __wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); 106 static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) 107 { 108 return __csum_partial_copy_nocheck(src, dst, len, 0); 109 } 110 111 /* 112 * Fold a partial checksum without adding pseudo headers 113 */ 114 static inline __sum16 csum_fold(__wsum csum) 115 { 116 u32 sum = (__force u32)csum; 117 118 sum += (sum << 16); 119 csum = (__force __wsum)(sum < (__force u32)csum); 120 sum >>= 16; 121 sum += (__force u32)csum; 122 123 return (__force __sum16)~sum; 124 } 125 #define csum_fold csum_fold 126 127 /* 128 * This is a version of ip_compute_csum() optimized for IP headers, 129 * which always checksum on 4 octet boundaries. 130 * 131 * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by 132 * Arnt Gulbrandsen. 133 */ 134 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 135 { 136 const unsigned int *word = iph; 137 const unsigned int *stop = word + ihl; 138 unsigned int csum; 139 int carry; 140 141 csum = word[0]; 142 csum += word[1]; 143 carry = (csum < word[1]); 144 csum += carry; 145 146 csum += word[2]; 147 carry = (csum < word[2]); 148 csum += carry; 149 150 csum += word[3]; 151 carry = (csum < word[3]); 152 csum += carry; 153 154 word += 4; 155 do { 156 csum += *word; 157 carry = (csum < *word); 158 csum += carry; 159 word++; 160 } while (word != stop); 161 162 return csum_fold(csum); 163 } 164 #define ip_fast_csum ip_fast_csum 165 166 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 167 __u32 len, __u8 proto, 168 __wsum sum) 169 { 170 __asm__( 171 " .set push # csum_tcpudp_nofold\n" 172 " .set noat \n" 173 #ifdef CONFIG_32BIT 174 " addu %0, %2 \n" 175 " sltu $1, %0, %2 \n" 176 " addu %0, $1 \n" 177 178 " addu %0, %3 \n" 179 " sltu $1, %0, %3 \n" 180 " addu %0, $1 \n" 181 182 " addu %0, %4 \n" 183 " sltu $1, %0, %4 \n" 184 " addu %0, $1 \n" 185 #endif 186 #ifdef CONFIG_64BIT 187 " daddu %0, %2 \n" 188 " daddu %0, %3 \n" 189 " daddu %0, %4 \n" 190 " dsll32 $1, %0, 0 \n" 191 " daddu %0, $1 \n" 192 " sltu $1, %0, $1 \n" 193 " dsra32 %0, %0, 0 \n" 194 " addu %0, $1 \n" 195 #endif 196 " .set pop" 197 : "=r" (sum) 198 : "0" ((__force unsigned long)daddr), 199 "r" ((__force unsigned long)saddr), 200 #ifdef __MIPSEL__ 201 "r" ((proto + len) << 8), 202 #else 203 "r" (proto + len), 204 #endif 205 "r" ((__force unsigned long)sum)); 206 207 return sum; 208 } 209 #define csum_tcpudp_nofold csum_tcpudp_nofold 210 211 /* 212 * this routine is used for miscellaneous IP-like checksums, mainly 213 * in icmp.c 214 */ 215 static inline __sum16 ip_compute_csum(const void *buff, int len) 216 { 217 return csum_fold(csum_partial(buff, len, 0)); 218 } 219 220 #define _HAVE_ARCH_IPV6_CSUM 221 static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 222 const struct in6_addr *daddr, 223 __u32 len, __u8 proto, 224 __wsum sum) 225 { 226 __wsum tmp; 227 228 __asm__( 229 " .set push # csum_ipv6_magic\n" 230 " .set noreorder \n" 231 " .set noat \n" 232 " addu %0, %5 # proto (long in network byte order)\n" 233 " sltu $1, %0, %5 \n" 234 " addu %0, $1 \n" 235 236 " addu %0, %6 # csum\n" 237 " sltu $1, %0, %6 \n" 238 " lw %1, 0(%2) # four words source address\n" 239 " addu %0, $1 \n" 240 " addu %0, %1 \n" 241 " sltu $1, %0, %1 \n" 242 243 " lw %1, 4(%2) \n" 244 " addu %0, $1 \n" 245 " addu %0, %1 \n" 246 " sltu $1, %0, %1 \n" 247 248 " lw %1, 8(%2) \n" 249 " addu %0, $1 \n" 250 " addu %0, %1 \n" 251 " sltu $1, %0, %1 \n" 252 253 " lw %1, 12(%2) \n" 254 " addu %0, $1 \n" 255 " addu %0, %1 \n" 256 " sltu $1, %0, %1 \n" 257 258 " lw %1, 0(%3) \n" 259 " addu %0, $1 \n" 260 " addu %0, %1 \n" 261 " sltu $1, %0, %1 \n" 262 263 " lw %1, 4(%3) \n" 264 " addu %0, $1 \n" 265 " addu %0, %1 \n" 266 " sltu $1, %0, %1 \n" 267 268 " lw %1, 8(%3) \n" 269 " addu %0, $1 \n" 270 " addu %0, %1 \n" 271 " sltu $1, %0, %1 \n" 272 273 " lw %1, 12(%3) \n" 274 " addu %0, $1 \n" 275 " addu %0, %1 \n" 276 " sltu $1, %0, %1 \n" 277 278 " addu %0, $1 # Add final carry\n" 279 " .set pop" 280 : "=&r" (sum), "=&r" (tmp) 281 : "r" (saddr), "r" (daddr), 282 "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)); 283 284 return csum_fold(sum); 285 } 286 287 #include <asm-generic/checksum.h> 288 #endif /* CONFIG_GENERIC_CSUM */ 289 290 #endif /* _ASM_CHECKSUM_H */ 291