1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _ASM_POWERPC_CHECKSUM_H 3 #define _ASM_POWERPC_CHECKSUM_H 4 #ifdef __KERNEL__ 5 6 /* 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/in6.h> 11 /* 12 * Computes the checksum of a memory block at src, length len, 13 * and adds in "sum" (32-bit), while copying the block to dst. 14 * If an access exception occurs on src or dst, it stores -EFAULT 15 * to *src_err or *dst_err respectively (if that pointer is not 16 * NULL), and, for an error on src, zeroes the rest of dst. 17 * 18 * Like csum_partial, this must be called with even lengths, 19 * except for the last fragment. 20 */ 21 extern __wsum csum_partial_copy_generic(const void *src, void *dst, 22 int len, __wsum sum, 23 int *src_err, int *dst_err); 24 25 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 26 extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, 27 int len); 28 #define HAVE_CSUM_COPY_USER 29 extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, 30 int len); 31 32 #define _HAVE_ARCH_CSUM_AND_COPY 33 #define csum_partial_copy_nocheck(src, dst, len) \ 34 csum_partial_copy_generic((src), (dst), (len), 0, NULL, NULL) 35 36 37 /* 38 * turns a 32-bit partial checksum (e.g. from csum_partial) into a 39 * 1's complement 16-bit checksum. 40 */ 41 static inline __sum16 csum_fold(__wsum sum) 42 { 43 unsigned int tmp; 44 45 /* swap the two 16-bit halves of sum */ 46 __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum)); 47 /* if there is a carry from adding the two 16-bit halves, 48 it will carry from the lower half into the upper half, 49 giving us the correct sum in the upper half. */ 50 return (__force __sum16)(~((__force u32)sum + tmp) >> 16); 51 } 52 53 static inline u32 from64to32(u64 x) 54 { 55 return (x + ror64(x, 32)) >> 32; 56 } 57 58 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, 59 __u8 proto, __wsum sum) 60 { 61 #ifdef __powerpc64__ 62 u64 s = (__force u32)sum; 63 64 s += (__force u32)saddr; 65 s += (__force u32)daddr; 66 #ifdef __BIG_ENDIAN__ 67 s += proto + len; 68 #else 69 s += (proto + len) << 8; 70 #endif 71 return (__force __wsum) from64to32(s); 72 #else 73 __asm__("\n\ 74 addc %0,%0,%1 \n\ 75 adde %0,%0,%2 \n\ 76 adde %0,%0,%3 \n\ 77 addze %0,%0 \n\ 78 " 79 : "=r" (sum) 80 : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum)); 81 return sum; 82 #endif 83 } 84 85 /* 86 * computes the checksum of the TCP/UDP pseudo-header 87 * returns a 16-bit checksum, already complemented 88 */ 89 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, 90 __u8 proto, __wsum sum) 91 { 92 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); 93 } 94 95 #define HAVE_ARCH_CSUM_ADD 96 static inline __wsum csum_add(__wsum csum, __wsum addend) 97 { 98 #ifdef __powerpc64__ 99 u64 res = (__force u64)csum; 100 #endif 101 if (__builtin_constant_p(csum) && csum == 0) 102 return addend; 103 if (__builtin_constant_p(addend) && addend == 0) 104 return csum; 105 106 #ifdef __powerpc64__ 107 res += (__force u64)addend; 108 return (__force __wsum)((u32)res + (res >> 32)); 109 #else 110 asm("addc %0,%0,%1;" 111 "addze %0,%0;" 112 : "+r" (csum) : "r" (addend) : "xer"); 113 return csum; 114 #endif 115 } 116 117 /* 118 * This is a version of ip_compute_csum() optimized for IP headers, 119 * which always checksum on 4 octet boundaries. ihl is the number 120 * of 32-bit words and is always >= 5. 121 */ 122 static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl) 123 { 124 const u32 *ptr = (const u32 *)iph + 1; 125 #ifdef __powerpc64__ 126 unsigned int i; 127 u64 s = *(const u32 *)iph; 128 129 for (i = 0; i < ihl - 1; i++, ptr++) 130 s += *ptr; 131 return (__force __wsum)from64to32(s); 132 #else 133 __wsum sum, tmp; 134 135 asm("mtctr %3;" 136 "addc %0,%4,%5;" 137 "1: lwzu %1, 4(%2);" 138 "adde %0,%0,%1;" 139 "bdnz 1b;" 140 "addze %0,%0;" 141 : "=r" (sum), "=r" (tmp), "+b" (ptr) 142 : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr) 143 : "ctr", "xer", "memory"); 144 145 return sum; 146 #endif 147 } 148 149 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 150 { 151 return csum_fold(ip_fast_csum_nofold(iph, ihl)); 152 } 153 154 /* 155 * computes the checksum of a memory block at buff, length len, 156 * and adds in "sum" (32-bit) 157 * 158 * returns a 32-bit number suitable for feeding into itself 159 * or csum_tcpudp_magic 160 * 161 * this function must be called with even lengths, except 162 * for the last fragment, which may be odd 163 * 164 * it's best to have buff aligned on a 32-bit boundary 165 */ 166 __wsum __csum_partial(const void *buff, int len, __wsum sum); 167 168 static inline __wsum csum_partial(const void *buff, int len, __wsum sum) 169 { 170 if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) { 171 if (len == 2) 172 sum = csum_add(sum, (__force __wsum)*(const u16 *)buff); 173 if (len >= 4) 174 sum = csum_add(sum, (__force __wsum)*(const u32 *)buff); 175 if (len == 6) 176 sum = csum_add(sum, (__force __wsum) 177 *(const u16 *)(buff + 4)); 178 if (len >= 8) 179 sum = csum_add(sum, (__force __wsum) 180 *(const u32 *)(buff + 4)); 181 if (len == 10) 182 sum = csum_add(sum, (__force __wsum) 183 *(const u16 *)(buff + 8)); 184 if (len >= 12) 185 sum = csum_add(sum, (__force __wsum) 186 *(const u32 *)(buff + 8)); 187 if (len == 14) 188 sum = csum_add(sum, (__force __wsum) 189 *(const u16 *)(buff + 12)); 190 if (len >= 16) 191 sum = csum_add(sum, (__force __wsum) 192 *(const u32 *)(buff + 12)); 193 } else if (__builtin_constant_p(len) && (len & 3) == 0) { 194 sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2)); 195 } else { 196 sum = __csum_partial(buff, len, sum); 197 } 198 return sum; 199 } 200 201 /* 202 * this routine is used for miscellaneous IP-like checksums, mainly 203 * in icmp.c 204 */ 205 static inline __sum16 ip_compute_csum(const void *buff, int len) 206 { 207 return csum_fold(csum_partial(buff, len, 0)); 208 } 209 210 #define _HAVE_ARCH_IPV6_CSUM 211 __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 212 const struct in6_addr *daddr, 213 __u32 len, __u8 proto, __wsum sum); 214 215 #endif /* __KERNEL__ */ 216 #endif 217