1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IPV4 GSO/GRO offload support 4 * Linux INET implementation 5 * 6 * TCPv4 GSO/GRO support 7 */ 8 9 #include <linux/indirect_call_wrapper.h> 10 #include <linux/skbuff.h> 11 #include <net/gro.h> 12 #include <net/gso.h> 13 #include <net/tcp.h> 14 #include <net/protocol.h> 15 16 static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb, 17 unsigned int seq, unsigned int mss) 18 { 19 u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP; 20 u32 ts_seq = skb_shinfo(gso_skb)->tskey; 21 22 while (skb) { 23 if (before(ts_seq, seq + mss)) { 24 skb_shinfo(skb)->tx_flags |= flags; 25 skb_shinfo(skb)->tskey = ts_seq; 26 return; 27 } 28 29 skb = skb->next; 30 seq += mss; 31 } 32 } 33 34 static void __tcpv4_gso_segment_csum(struct sk_buff *seg, 35 __be32 *oldip, __be32 newip, 36 __be16 *oldport, __be16 newport) 37 { 38 struct tcphdr *th; 39 struct iphdr *iph; 40 41 if (*oldip == newip && *oldport == newport) 42 return; 43 44 th = tcp_hdr(seg); 45 iph = ip_hdr(seg); 46 47 inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true); 48 inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false); 49 *oldport = newport; 50 51 csum_replace4(&iph->check, *oldip, newip); 52 *oldip = newip; 53 } 54 55 static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs) 56 { 57 const struct tcphdr *th; 58 const struct iphdr *iph; 59 struct sk_buff *seg; 60 struct tcphdr *th2; 61 struct iphdr *iph2; 62 63 seg = segs; 64 th = tcp_hdr(seg); 65 iph = ip_hdr(seg); 66 th2 = tcp_hdr(seg->next); 67 iph2 = ip_hdr(seg->next); 68 69 if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) && 70 iph->daddr == iph2->daddr && iph->saddr == iph2->saddr) 71 return segs; 72 73 while ((seg = seg->next)) { 74 th2 = tcp_hdr(seg); 75 iph2 = ip_hdr(seg); 76 77 __tcpv4_gso_segment_csum(seg, 78 &iph2->saddr, iph->saddr, 79 &th2->source, th->source); 80 __tcpv4_gso_segment_csum(seg, 81 &iph2->daddr, iph->daddr, 82 &th2->dest, th->dest); 83 } 84 85 return segs; 86 } 87 88 static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb, 89 netdev_features_t features) 90 { 91 skb = skb_segment_list(skb, features, skb_mac_header_len(skb)); 92 if (IS_ERR(skb)) 93 return skb; 94 95 return __tcpv4_gso_segment_list_csum(skb); 96 } 97 98 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, 99 netdev_features_t features) 100 { 101 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) 102 return ERR_PTR(-EINVAL); 103 104 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 105 return ERR_PTR(-EINVAL); 106 107 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { 108 struct tcphdr *th = tcp_hdr(skb); 109 110 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) 111 return __tcp4_gso_segment_list(skb, features); 112 113 skb->ip_summed = CHECKSUM_NONE; 114 } 115 116 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 117 const struct iphdr *iph = ip_hdr(skb); 118 struct tcphdr *th = tcp_hdr(skb); 119 120 /* Set up checksum pseudo header, usually expect stack to 121 * have done this already. 122 */ 123 124 th->check = 0; 125 skb->ip_summed = CHECKSUM_PARTIAL; 126 __tcp_v4_send_check(skb, iph->saddr, iph->daddr); 127 } 128 129 return tcp_gso_segment(skb, features); 130 } 131 132 struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 133 netdev_features_t features) 134 { 135 struct sk_buff *segs = ERR_PTR(-EINVAL); 136 unsigned int sum_truesize = 0; 137 struct tcphdr *th; 138 unsigned int thlen; 139 unsigned int seq; 140 unsigned int oldlen; 141 unsigned int mss; 142 struct sk_buff *gso_skb = skb; 143 __sum16 newcheck; 144 bool ooo_okay, copy_destructor; 145 __wsum delta; 146 147 th = tcp_hdr(skb); 148 thlen = th->doff * 4; 149 if (thlen < sizeof(*th)) 150 goto out; 151 152 if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb))) 153 goto out; 154 155 if (!pskb_may_pull(skb, thlen)) 156 goto out; 157 158 oldlen = ~skb->len; 159 __skb_pull(skb, thlen); 160 161 mss = skb_shinfo(skb)->gso_size; 162 if (unlikely(skb->len <= mss)) 163 goto out; 164 165 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 166 /* Packet is from an untrusted source, reset gso_segs. */ 167 168 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 169 170 segs = NULL; 171 goto out; 172 } 173 174 copy_destructor = gso_skb->destructor == tcp_wfree; 175 ooo_okay = gso_skb->ooo_okay; 176 /* All segments but the first should have ooo_okay cleared */ 177 skb->ooo_okay = 0; 178 179 segs = skb_segment(skb, features); 180 if (IS_ERR(segs)) 181 goto out; 182 183 /* Only first segment might have ooo_okay set */ 184 segs->ooo_okay = ooo_okay; 185 186 /* GSO partial and frag_list segmentation only requires splitting 187 * the frame into an MSS multiple and possibly a remainder, both 188 * cases return a GSO skb. So update the mss now. 189 */ 190 if (skb_is_gso(segs)) 191 mss *= skb_shinfo(segs)->gso_segs; 192 193 delta = (__force __wsum)htonl(oldlen + thlen + mss); 194 195 skb = segs; 196 th = tcp_hdr(skb); 197 seq = ntohl(th->seq); 198 199 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP)) 200 tcp_gso_tstamp(segs, gso_skb, seq, mss); 201 202 newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); 203 204 while (skb->next) { 205 th->fin = th->psh = 0; 206 th->check = newcheck; 207 208 if (skb->ip_summed == CHECKSUM_PARTIAL) 209 gso_reset_checksum(skb, ~th->check); 210 else 211 th->check = gso_make_checksum(skb, ~th->check); 212 213 seq += mss; 214 if (copy_destructor) { 215 skb->destructor = gso_skb->destructor; 216 skb->sk = gso_skb->sk; 217 sum_truesize += skb->truesize; 218 } 219 skb = skb->next; 220 th = tcp_hdr(skb); 221 222 th->seq = htonl(seq); 223 th->cwr = 0; 224 } 225 226 /* Following permits TCP Small Queues to work well with GSO : 227 * The callback to TCP stack will be called at the time last frag 228 * is freed at TX completion, and not right now when gso_skb 229 * is freed by GSO engine 230 */ 231 if (copy_destructor) { 232 int delta; 233 234 swap(gso_skb->sk, skb->sk); 235 swap(gso_skb->destructor, skb->destructor); 236 sum_truesize += skb->truesize; 237 delta = sum_truesize - gso_skb->truesize; 238 /* In some pathological cases, delta can be negative. 239 * We need to either use refcount_add() or refcount_sub_and_test() 240 */ 241 if (likely(delta >= 0)) 242 refcount_add(delta, &skb->sk->sk_wmem_alloc); 243 else 244 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); 245 } 246 247 delta = (__force __wsum)htonl(oldlen + 248 (skb_tail_pointer(skb) - 249 skb_transport_header(skb)) + 250 skb->data_len); 251 th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta)); 252 if (skb->ip_summed == CHECKSUM_PARTIAL) 253 gso_reset_checksum(skb, ~th->check); 254 else 255 th->check = gso_make_checksum(skb, ~th->check); 256 out: 257 return segs; 258 } 259 260 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th) 261 { 262 struct tcphdr *th2; 263 struct sk_buff *p; 264 265 list_for_each_entry(p, head, list) { 266 if (!NAPI_GRO_CB(p)->same_flow) 267 continue; 268 269 th2 = tcp_hdr(p); 270 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { 271 NAPI_GRO_CB(p)->same_flow = 0; 272 continue; 273 } 274 275 return p; 276 } 277 278 return NULL; 279 } 280 281 struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb) 282 { 283 unsigned int thlen, hlen, off; 284 struct tcphdr *th; 285 286 off = skb_gro_offset(skb); 287 hlen = off + sizeof(*th); 288 th = skb_gro_header(skb, hlen, off); 289 if (unlikely(!th)) 290 return NULL; 291 292 thlen = th->doff * 4; 293 if (thlen < sizeof(*th)) 294 return NULL; 295 296 hlen = off + thlen; 297 if (!skb_gro_may_pull(skb, hlen)) { 298 th = skb_gro_header_slow(skb, hlen, off); 299 if (unlikely(!th)) 300 return NULL; 301 } 302 303 skb_gro_pull(skb, thlen); 304 305 return th; 306 } 307 308 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb, 309 struct tcphdr *th) 310 { 311 unsigned int thlen = th->doff * 4; 312 struct sk_buff *pp = NULL; 313 struct sk_buff *p; 314 struct tcphdr *th2; 315 unsigned int len; 316 __be32 flags; 317 unsigned int mss = 1; 318 int flush = 1; 319 int i; 320 321 len = skb_gro_len(skb); 322 flags = tcp_flag_word(th); 323 324 p = tcp_gro_lookup(head, th); 325 if (!p) 326 goto out_check_final; 327 328 th2 = tcp_hdr(p); 329 flush = (__force int)(flags & TCP_FLAG_CWR); 330 flush |= (__force int)((flags ^ tcp_flag_word(th2)) & 331 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); 332 flush |= (__force int)(th->ack_seq ^ th2->ack_seq); 333 for (i = sizeof(*th); i < thlen; i += 4) 334 flush |= *(u32 *)((u8 *)th + i) ^ 335 *(u32 *)((u8 *)th2 + i); 336 337 flush |= gro_receive_network_flush(th, th2, p); 338 339 mss = skb_shinfo(p)->gso_size; 340 341 /* If skb is a GRO packet, make sure its gso_size matches prior packet mss. 342 * If it is a single frame, do not aggregate it if its length 343 * is bigger than our mss. 344 */ 345 if (unlikely(skb_is_gso(skb))) 346 flush |= (mss != skb_shinfo(skb)->gso_size); 347 else 348 flush |= (len - 1) >= mss; 349 350 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); 351 flush |= skb_cmp_decrypted(p, skb); 352 353 if (unlikely(NAPI_GRO_CB(p)->is_flist)) { 354 flush |= (__force int)(flags ^ tcp_flag_word(th2)); 355 flush |= skb->ip_summed != p->ip_summed; 356 flush |= skb->csum_level != p->csum_level; 357 flush |= NAPI_GRO_CB(p)->count >= 64; 358 359 if (flush || skb_gro_receive_list(p, skb)) 360 mss = 1; 361 362 goto out_check_final; 363 } 364 365 if (flush || skb_gro_receive(p, skb)) { 366 mss = 1; 367 goto out_check_final; 368 } 369 370 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); 371 372 out_check_final: 373 /* Force a flush if last segment is smaller than mss. */ 374 if (unlikely(skb_is_gso(skb))) 375 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size; 376 else 377 flush = len < mss; 378 379 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | 380 TCP_FLAG_RST | TCP_FLAG_SYN | 381 TCP_FLAG_FIN)); 382 383 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) 384 pp = p; 385 386 NAPI_GRO_CB(skb)->flush |= (flush != 0); 387 388 return pp; 389 } 390 391 void tcp_gro_complete(struct sk_buff *skb) 392 { 393 struct tcphdr *th = tcp_hdr(skb); 394 struct skb_shared_info *shinfo; 395 396 if (skb->encapsulation) 397 skb->inner_transport_header = skb->transport_header; 398 399 skb->csum_start = (unsigned char *)th - skb->head; 400 skb->csum_offset = offsetof(struct tcphdr, check); 401 skb->ip_summed = CHECKSUM_PARTIAL; 402 403 shinfo = skb_shinfo(skb); 404 shinfo->gso_segs = NAPI_GRO_CB(skb)->count; 405 406 if (th->cwr) 407 shinfo->gso_type |= SKB_GSO_TCP_ECN; 408 } 409 EXPORT_SYMBOL(tcp_gro_complete); 410 411 static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, 412 struct tcphdr *th) 413 { 414 const struct iphdr *iph; 415 struct sk_buff *p; 416 struct sock *sk; 417 struct net *net; 418 int iif, sdif; 419 420 if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST))) 421 return; 422 423 p = tcp_gro_lookup(head, th); 424 if (p) { 425 NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; 426 return; 427 } 428 429 inet_get_iif_sdif(skb, &iif, &sdif); 430 iph = skb_gro_network_header(skb); 431 net = dev_net(skb->dev); 432 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, 433 iph->saddr, th->source, 434 iph->daddr, ntohs(th->dest), 435 iif, sdif); 436 NAPI_GRO_CB(skb)->is_flist = !sk; 437 if (sk) 438 sock_put(sk); 439 } 440 441 INDIRECT_CALLABLE_SCOPE 442 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) 443 { 444 struct tcphdr *th; 445 446 /* Don't bother verifying checksum if we're going to flush anyway. */ 447 if (!NAPI_GRO_CB(skb)->flush && 448 skb_gro_checksum_validate(skb, IPPROTO_TCP, 449 inet_gro_compute_pseudo)) 450 goto flush; 451 452 th = tcp_gro_pull_header(skb); 453 if (!th) 454 goto flush; 455 456 tcp4_check_fraglist_gro(head, skb, th); 457 458 return tcp_gro_receive(head, skb, th); 459 460 flush: 461 NAPI_GRO_CB(skb)->flush = 1; 462 return NULL; 463 } 464 465 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff) 466 { 467 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; 468 const struct iphdr *iph = (struct iphdr *)(skb->data + offset); 469 struct tcphdr *th = tcp_hdr(skb); 470 471 if (unlikely(NAPI_GRO_CB(skb)->is_flist)) { 472 skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4; 473 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 474 475 __skb_incr_checksum_unnecessary(skb); 476 477 return 0; 478 } 479 480 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, 481 iph->daddr, 0); 482 483 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 | 484 (NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID); 485 486 tcp_gro_complete(skb); 487 return 0; 488 } 489 490 int __init tcpv4_offload_init(void) 491 { 492 net_hotdata.tcpv4_offload = (struct net_offload) { 493 .callbacks = { 494 .gso_segment = tcp4_gso_segment, 495 .gro_receive = tcp4_gro_receive, 496 .gro_complete = tcp4_gro_complete, 497 }, 498 }; 499 return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP); 500 } 501