12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 228850dc7SDaniel Borkmann /* 328850dc7SDaniel Borkmann * IPV4 GSO/GRO offload support 428850dc7SDaniel Borkmann * Linux INET implementation 528850dc7SDaniel Borkmann * 628850dc7SDaniel Borkmann * TCPv4 GSO/GRO support 728850dc7SDaniel Borkmann */ 828850dc7SDaniel Borkmann 9028e0a47SPaolo Abeni #include <linux/indirect_call_wrapper.h> 1028850dc7SDaniel Borkmann #include <linux/skbuff.h> 114721031cSEric Dumazet #include <net/gro.h> 12d457a0e3SEric Dumazet #include <net/gso.h> 1328850dc7SDaniel Borkmann #include <net/tcp.h> 1428850dc7SDaniel Borkmann #include <net/protocol.h> 1528850dc7SDaniel Borkmann 16f066e2b0SWillem de Bruijn static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, 17f066e2b0SWillem de Bruijn unsigned int seq, unsigned int mss) 184ed2d765SWillem de Bruijn { 194ed2d765SWillem de Bruijn while (skb) { 20f066e2b0SWillem de Bruijn if (before(ts_seq, seq + mss)) { 21f066e2b0SWillem de Bruijn skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; 224ed2d765SWillem de Bruijn skb_shinfo(skb)->tskey = ts_seq; 234ed2d765SWillem de Bruijn return; 244ed2d765SWillem de Bruijn } 254ed2d765SWillem de Bruijn 264ed2d765SWillem de Bruijn skb = skb->next; 274ed2d765SWillem de Bruijn seq += mss; 284ed2d765SWillem de Bruijn } 294ed2d765SWillem de Bruijn } 304ed2d765SWillem de Bruijn 31bee88cd5SFelix Fietkau static void __tcpv4_gso_segment_csum(struct sk_buff *seg, 32bee88cd5SFelix Fietkau __be32 *oldip, __be32 newip, 33bee88cd5SFelix Fietkau __be16 *oldport, __be16 newport) 34bee88cd5SFelix Fietkau { 35bee88cd5SFelix Fietkau struct tcphdr *th; 36bee88cd5SFelix Fietkau struct iphdr *iph; 37bee88cd5SFelix Fietkau 38bee88cd5SFelix Fietkau if (*oldip == newip && *oldport == newport) 39bee88cd5SFelix Fietkau return; 40bee88cd5SFelix Fietkau 41bee88cd5SFelix Fietkau th = tcp_hdr(seg); 42bee88cd5SFelix Fietkau iph = ip_hdr(seg); 43bee88cd5SFelix Fietkau 44bee88cd5SFelix Fietkau inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true); 45bee88cd5SFelix Fietkau inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false); 46bee88cd5SFelix Fietkau *oldport = newport; 47bee88cd5SFelix Fietkau 48bee88cd5SFelix Fietkau csum_replace4(&iph->check, *oldip, newip); 49bee88cd5SFelix Fietkau *oldip = newip; 50bee88cd5SFelix Fietkau } 51bee88cd5SFelix Fietkau 52bee88cd5SFelix Fietkau static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs) 53bee88cd5SFelix Fietkau { 54bee88cd5SFelix Fietkau const struct tcphdr *th; 55bee88cd5SFelix Fietkau const struct iphdr *iph; 56bee88cd5SFelix Fietkau struct sk_buff *seg; 57bee88cd5SFelix Fietkau struct tcphdr *th2; 58bee88cd5SFelix Fietkau struct iphdr *iph2; 59bee88cd5SFelix Fietkau 60bee88cd5SFelix Fietkau seg = segs; 61bee88cd5SFelix Fietkau th = tcp_hdr(seg); 62bee88cd5SFelix Fietkau iph = ip_hdr(seg); 63bee88cd5SFelix Fietkau th2 = tcp_hdr(seg->next); 64bee88cd5SFelix Fietkau iph2 = ip_hdr(seg->next); 65bee88cd5SFelix Fietkau 66bee88cd5SFelix Fietkau if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) && 67bee88cd5SFelix Fietkau iph->daddr == iph2->daddr && iph->saddr == iph2->saddr) 68bee88cd5SFelix Fietkau return segs; 69bee88cd5SFelix Fietkau 70bee88cd5SFelix Fietkau while ((seg = seg->next)) { 71bee88cd5SFelix Fietkau th2 = tcp_hdr(seg); 72bee88cd5SFelix Fietkau iph2 = ip_hdr(seg); 73bee88cd5SFelix Fietkau 74bee88cd5SFelix Fietkau __tcpv4_gso_segment_csum(seg, 75bee88cd5SFelix Fietkau &iph2->saddr, iph->saddr, 76bee88cd5SFelix Fietkau &th2->source, th->source); 77bee88cd5SFelix Fietkau __tcpv4_gso_segment_csum(seg, 78bee88cd5SFelix Fietkau &iph2->daddr, iph->daddr, 79bee88cd5SFelix Fietkau &th2->dest, th->dest); 80bee88cd5SFelix Fietkau } 81bee88cd5SFelix Fietkau 82bee88cd5SFelix Fietkau return segs; 83bee88cd5SFelix Fietkau } 84bee88cd5SFelix Fietkau 85bee88cd5SFelix Fietkau static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb, 86bee88cd5SFelix Fietkau netdev_features_t features) 87bee88cd5SFelix Fietkau { 88bee88cd5SFelix Fietkau skb = skb_segment_list(skb, features, skb_mac_header_len(skb)); 89bee88cd5SFelix Fietkau if (IS_ERR(skb)) 90bee88cd5SFelix Fietkau return skb; 91bee88cd5SFelix Fietkau 92bee88cd5SFelix Fietkau return __tcpv4_gso_segment_list_csum(skb); 93bee88cd5SFelix Fietkau } 94bee88cd5SFelix Fietkau 9574abc20cSEric Dumazet static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, 96d020f8f7STom Herbert netdev_features_t features) 97d020f8f7STom Herbert { 98121d57afSWillem de Bruijn if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) 99121d57afSWillem de Bruijn return ERR_PTR(-EINVAL); 100121d57afSWillem de Bruijn 101d020f8f7STom Herbert if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 102d020f8f7STom Herbert return ERR_PTR(-EINVAL); 103d020f8f7STom Herbert 104bee88cd5SFelix Fietkau if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) 105bee88cd5SFelix Fietkau return __tcp4_gso_segment_list(skb, features); 106bee88cd5SFelix Fietkau 107d020f8f7STom Herbert if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 108d020f8f7STom Herbert const struct iphdr *iph = ip_hdr(skb); 109d020f8f7STom Herbert struct tcphdr *th = tcp_hdr(skb); 110d020f8f7STom Herbert 111d020f8f7STom Herbert /* Set up checksum pseudo header, usually expect stack to 112d020f8f7STom Herbert * have done this already. 113d020f8f7STom Herbert */ 114d020f8f7STom Herbert 115d020f8f7STom Herbert th->check = 0; 116d020f8f7STom Herbert skb->ip_summed = CHECKSUM_PARTIAL; 117d020f8f7STom Herbert __tcp_v4_send_check(skb, iph->saddr, iph->daddr); 118d020f8f7STom Herbert } 119d020f8f7STom Herbert 120d020f8f7STom Herbert return tcp_gso_segment(skb, features); 121d020f8f7STom Herbert } 122d020f8f7STom Herbert 12328be6e07SEric Dumazet struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 12428850dc7SDaniel Borkmann netdev_features_t features) 12528850dc7SDaniel Borkmann { 12628850dc7SDaniel Borkmann struct sk_buff *segs = ERR_PTR(-EINVAL); 1270d08c42cSEric Dumazet unsigned int sum_truesize = 0; 12828850dc7SDaniel Borkmann struct tcphdr *th; 12928850dc7SDaniel Borkmann unsigned int thlen; 13028850dc7SDaniel Borkmann unsigned int seq; 13128850dc7SDaniel Borkmann unsigned int oldlen; 13228850dc7SDaniel Borkmann unsigned int mss; 13328850dc7SDaniel Borkmann struct sk_buff *gso_skb = skb; 13428850dc7SDaniel Borkmann __sum16 newcheck; 13528850dc7SDaniel Borkmann bool ooo_okay, copy_destructor; 13682a01ab3SEric Dumazet __wsum delta; 13728850dc7SDaniel Borkmann 13828850dc7SDaniel Borkmann th = tcp_hdr(skb); 13928850dc7SDaniel Borkmann thlen = th->doff * 4; 14028850dc7SDaniel Borkmann if (thlen < sizeof(*th)) 14128850dc7SDaniel Borkmann goto out; 14228850dc7SDaniel Borkmann 14328850dc7SDaniel Borkmann if (!pskb_may_pull(skb, thlen)) 14428850dc7SDaniel Borkmann goto out; 14528850dc7SDaniel Borkmann 14682a01ab3SEric Dumazet oldlen = ~skb->len; 14728850dc7SDaniel Borkmann __skb_pull(skb, thlen); 14828850dc7SDaniel Borkmann 149a7eea416SEric Dumazet mss = skb_shinfo(skb)->gso_size; 15028850dc7SDaniel Borkmann if (unlikely(skb->len <= mss)) 15128850dc7SDaniel Borkmann goto out; 15228850dc7SDaniel Borkmann 15328850dc7SDaniel Borkmann if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { 15428850dc7SDaniel Borkmann /* Packet is from an untrusted source, reset gso_segs. */ 15528850dc7SDaniel Borkmann 15628850dc7SDaniel Borkmann skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); 15728850dc7SDaniel Borkmann 15828850dc7SDaniel Borkmann segs = NULL; 15928850dc7SDaniel Borkmann goto out; 16028850dc7SDaniel Borkmann } 16128850dc7SDaniel Borkmann 16228850dc7SDaniel Borkmann copy_destructor = gso_skb->destructor == tcp_wfree; 16328850dc7SDaniel Borkmann ooo_okay = gso_skb->ooo_okay; 16428850dc7SDaniel Borkmann /* All segments but the first should have ooo_okay cleared */ 16528850dc7SDaniel Borkmann skb->ooo_okay = 0; 16628850dc7SDaniel Borkmann 16728850dc7SDaniel Borkmann segs = skb_segment(skb, features); 16828850dc7SDaniel Borkmann if (IS_ERR(segs)) 16928850dc7SDaniel Borkmann goto out; 17028850dc7SDaniel Borkmann 17128850dc7SDaniel Borkmann /* Only first segment might have ooo_okay set */ 17228850dc7SDaniel Borkmann segs->ooo_okay = ooo_okay; 17328850dc7SDaniel Borkmann 17407b26c94SSteffen Klassert /* GSO partial and frag_list segmentation only requires splitting 17507b26c94SSteffen Klassert * the frame into an MSS multiple and possibly a remainder, both 17607b26c94SSteffen Klassert * cases return a GSO skb. So update the mss now. 17707b26c94SSteffen Klassert */ 17807b26c94SSteffen Klassert if (skb_is_gso(segs)) 17907b26c94SSteffen Klassert mss *= skb_shinfo(segs)->gso_segs; 18007b26c94SSteffen Klassert 18182a01ab3SEric Dumazet delta = (__force __wsum)htonl(oldlen + thlen + mss); 18228850dc7SDaniel Borkmann 18328850dc7SDaniel Borkmann skb = segs; 18428850dc7SDaniel Borkmann th = tcp_hdr(skb); 18528850dc7SDaniel Borkmann seq = ntohl(th->seq); 18628850dc7SDaniel Borkmann 1874ed2d765SWillem de Bruijn if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) 1884ed2d765SWillem de Bruijn tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); 1894ed2d765SWillem de Bruijn 19082a01ab3SEric Dumazet newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); 19128850dc7SDaniel Borkmann 192802ab55aSAlexander Duyck while (skb->next) { 19328850dc7SDaniel Borkmann th->fin = th->psh = 0; 19428850dc7SDaniel Borkmann th->check = newcheck; 19528850dc7SDaniel Borkmann 19608b64fccSAlexander Duyck if (skb->ip_summed == CHECKSUM_PARTIAL) 19708b64fccSAlexander Duyck gso_reset_checksum(skb, ~th->check); 19808b64fccSAlexander Duyck else 199e9c3a24bSTom Herbert th->check = gso_make_checksum(skb, ~th->check); 20028850dc7SDaniel Borkmann 20128850dc7SDaniel Borkmann seq += mss; 20228850dc7SDaniel Borkmann if (copy_destructor) { 20328850dc7SDaniel Borkmann skb->destructor = gso_skb->destructor; 20428850dc7SDaniel Borkmann skb->sk = gso_skb->sk; 2050d08c42cSEric Dumazet sum_truesize += skb->truesize; 20628850dc7SDaniel Borkmann } 20728850dc7SDaniel Borkmann skb = skb->next; 20828850dc7SDaniel Borkmann th = tcp_hdr(skb); 20928850dc7SDaniel Borkmann 21028850dc7SDaniel Borkmann th->seq = htonl(seq); 21128850dc7SDaniel Borkmann th->cwr = 0; 212802ab55aSAlexander Duyck } 21328850dc7SDaniel Borkmann 21428850dc7SDaniel Borkmann /* Following permits TCP Small Queues to work well with GSO : 21528850dc7SDaniel Borkmann * The callback to TCP stack will be called at the time last frag 21628850dc7SDaniel Borkmann * is freed at TX completion, and not right now when gso_skb 21728850dc7SDaniel Borkmann * is freed by GSO engine 21828850dc7SDaniel Borkmann */ 21928850dc7SDaniel Borkmann if (copy_destructor) { 2207ec318feSEric Dumazet int delta; 2217ec318feSEric Dumazet 22228850dc7SDaniel Borkmann swap(gso_skb->sk, skb->sk); 22328850dc7SDaniel Borkmann swap(gso_skb->destructor, skb->destructor); 2240d08c42cSEric Dumazet sum_truesize += skb->truesize; 2257ec318feSEric Dumazet delta = sum_truesize - gso_skb->truesize; 2267ec318feSEric Dumazet /* In some pathological cases, delta can be negative. 2277ec318feSEric Dumazet * We need to either use refcount_add() or refcount_sub_and_test() 2287ec318feSEric Dumazet */ 2297ec318feSEric Dumazet if (likely(delta >= 0)) 2307ec318feSEric Dumazet refcount_add(delta, &skb->sk->sk_wmem_alloc); 2317ec318feSEric Dumazet else 2327ec318feSEric Dumazet WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); 23328850dc7SDaniel Borkmann } 23428850dc7SDaniel Borkmann 23582a01ab3SEric Dumazet delta = (__force __wsum)htonl(oldlen + 23682a01ab3SEric Dumazet (skb_tail_pointer(skb) - 23728850dc7SDaniel Borkmann skb_transport_header(skb)) + 23828850dc7SDaniel Borkmann skb->data_len); 23982a01ab3SEric Dumazet th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta)); 24008b64fccSAlexander Duyck if (skb->ip_summed == CHECKSUM_PARTIAL) 24108b64fccSAlexander Duyck gso_reset_checksum(skb, ~th->check); 24208b64fccSAlexander Duyck else 243e9c3a24bSTom Herbert th->check = gso_make_checksum(skb, ~th->check); 24428850dc7SDaniel Borkmann out: 24528850dc7SDaniel Borkmann return segs; 24628850dc7SDaniel Borkmann } 24728850dc7SDaniel Borkmann 248d4546c25SDavid Miller struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb) 24928850dc7SDaniel Borkmann { 250d4546c25SDavid Miller struct sk_buff *pp = NULL; 25128850dc7SDaniel Borkmann struct sk_buff *p; 25228850dc7SDaniel Borkmann struct tcphdr *th; 25328850dc7SDaniel Borkmann struct tcphdr *th2; 25428850dc7SDaniel Borkmann unsigned int len; 25528850dc7SDaniel Borkmann unsigned int thlen; 25628850dc7SDaniel Borkmann __be32 flags; 25728850dc7SDaniel Borkmann unsigned int mss = 1; 25828850dc7SDaniel Borkmann unsigned int hlen; 25928850dc7SDaniel Borkmann unsigned int off; 26028850dc7SDaniel Borkmann int flush = 1; 26128850dc7SDaniel Borkmann int i; 26228850dc7SDaniel Borkmann 26328850dc7SDaniel Borkmann off = skb_gro_offset(skb); 26428850dc7SDaniel Borkmann hlen = off + sizeof(*th); 26535ffb665SRichard Gobert th = skb_gro_header(skb, hlen, off); 26628850dc7SDaniel Borkmann if (unlikely(!th)) 26728850dc7SDaniel Borkmann goto out; 26828850dc7SDaniel Borkmann 26928850dc7SDaniel Borkmann thlen = th->doff * 4; 27028850dc7SDaniel Borkmann if (thlen < sizeof(*th)) 27128850dc7SDaniel Borkmann goto out; 27228850dc7SDaniel Borkmann 27328850dc7SDaniel Borkmann hlen = off + thlen; 27493e16ea0SEric Dumazet if (!skb_gro_may_pull(skb, hlen)) { 27528850dc7SDaniel Borkmann th = skb_gro_header_slow(skb, hlen, off); 27628850dc7SDaniel Borkmann if (unlikely(!th)) 27728850dc7SDaniel Borkmann goto out; 27828850dc7SDaniel Borkmann } 27928850dc7SDaniel Borkmann 28028850dc7SDaniel Borkmann skb_gro_pull(skb, thlen); 28128850dc7SDaniel Borkmann 28228850dc7SDaniel Borkmann len = skb_gro_len(skb); 28328850dc7SDaniel Borkmann flags = tcp_flag_word(th); 28428850dc7SDaniel Borkmann 285d4546c25SDavid Miller list_for_each_entry(p, head, list) { 28628850dc7SDaniel Borkmann if (!NAPI_GRO_CB(p)->same_flow) 28728850dc7SDaniel Borkmann continue; 28828850dc7SDaniel Borkmann 28928850dc7SDaniel Borkmann th2 = tcp_hdr(p); 29028850dc7SDaniel Borkmann 29128850dc7SDaniel Borkmann if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { 29228850dc7SDaniel Borkmann NAPI_GRO_CB(p)->same_flow = 0; 29328850dc7SDaniel Borkmann continue; 29428850dc7SDaniel Borkmann } 29528850dc7SDaniel Borkmann 29628850dc7SDaniel Borkmann goto found; 29728850dc7SDaniel Borkmann } 298d4546c25SDavid Miller p = NULL; 29928850dc7SDaniel Borkmann goto out_check_final; 30028850dc7SDaniel Borkmann 30128850dc7SDaniel Borkmann found: 302bf5a755fSJerry Chu /* Include the IP ID check below from the inner most IP hdr */ 3031530545eSAlexander Duyck flush = NAPI_GRO_CB(p)->flush; 30428850dc7SDaniel Borkmann flush |= (__force int)(flags & TCP_FLAG_CWR); 30528850dc7SDaniel Borkmann flush |= (__force int)((flags ^ tcp_flag_word(th2)) & 30628850dc7SDaniel Borkmann ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); 30728850dc7SDaniel Borkmann flush |= (__force int)(th->ack_seq ^ th2->ack_seq); 30828850dc7SDaniel Borkmann for (i = sizeof(*th); i < thlen; i += 4) 30928850dc7SDaniel Borkmann flush |= *(u32 *)((u8 *)th + i) ^ 31028850dc7SDaniel Borkmann *(u32 *)((u8 *)th2 + i); 31128850dc7SDaniel Borkmann 3121530545eSAlexander Duyck /* When we receive our second frame we can made a decision on if we 3131530545eSAlexander Duyck * continue this flow as an atomic flow with a fixed ID or if we use 3141530545eSAlexander Duyck * an incrementing ID. 3151530545eSAlexander Duyck */ 3161530545eSAlexander Duyck if (NAPI_GRO_CB(p)->flush_id != 1 || 3171530545eSAlexander Duyck NAPI_GRO_CB(p)->count != 1 || 3181530545eSAlexander Duyck !NAPI_GRO_CB(p)->is_atomic) 3191530545eSAlexander Duyck flush |= NAPI_GRO_CB(p)->flush_id; 3201530545eSAlexander Duyck else 3211530545eSAlexander Duyck NAPI_GRO_CB(p)->is_atomic = false; 3221530545eSAlexander Duyck 323a7eea416SEric Dumazet mss = skb_shinfo(p)->gso_size; 32428850dc7SDaniel Borkmann 3255eddb249SCoco Li /* If skb is a GRO packet, make sure its gso_size matches prior packet mss. 3265eddb249SCoco Li * If it is a single frame, do not aggregate it if its length 3275eddb249SCoco Li * is bigger than our mss. 3285eddb249SCoco Li */ 3295eddb249SCoco Li if (unlikely(skb_is_gso(skb))) 3305eddb249SCoco Li flush |= (mss != skb_shinfo(skb)->gso_size); 3315eddb249SCoco Li else 33228850dc7SDaniel Borkmann flush |= (len - 1) >= mss; 3335eddb249SCoco Li 33428850dc7SDaniel Borkmann flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); 3359f06f87fSJakub Kicinski flush |= skb_cmp_decrypted(p, skb); 33628850dc7SDaniel Borkmann 337*8d95dc47SFelix Fietkau if (unlikely(NAPI_GRO_CB(p)->is_flist)) { 338*8d95dc47SFelix Fietkau flush |= (__force int)(flags ^ tcp_flag_word(th2)); 339*8d95dc47SFelix Fietkau flush |= skb->ip_summed != p->ip_summed; 340*8d95dc47SFelix Fietkau flush |= skb->csum_level != p->csum_level; 341*8d95dc47SFelix Fietkau flush |= NAPI_GRO_CB(p)->count >= 64; 342*8d95dc47SFelix Fietkau 343*8d95dc47SFelix Fietkau if (flush || skb_gro_receive_list(p, skb)) 344*8d95dc47SFelix Fietkau mss = 1; 345*8d95dc47SFelix Fietkau 346*8d95dc47SFelix Fietkau goto out_check_final; 347*8d95dc47SFelix Fietkau } 348*8d95dc47SFelix Fietkau 349d4546c25SDavid Miller if (flush || skb_gro_receive(p, skb)) { 35028850dc7SDaniel Borkmann mss = 1; 35128850dc7SDaniel Borkmann goto out_check_final; 35228850dc7SDaniel Borkmann } 35328850dc7SDaniel Borkmann 35428850dc7SDaniel Borkmann tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); 35528850dc7SDaniel Borkmann 35628850dc7SDaniel Borkmann out_check_final: 3575eddb249SCoco Li /* Force a flush if last segment is smaller than mss. */ 3585eddb249SCoco Li if (unlikely(skb_is_gso(skb))) 3595eddb249SCoco Li flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size; 3605eddb249SCoco Li else 36128850dc7SDaniel Borkmann flush = len < mss; 3625eddb249SCoco Li 36328850dc7SDaniel Borkmann flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | 36428850dc7SDaniel Borkmann TCP_FLAG_RST | TCP_FLAG_SYN | 36528850dc7SDaniel Borkmann TCP_FLAG_FIN)); 36628850dc7SDaniel Borkmann 36728850dc7SDaniel Borkmann if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) 368d4546c25SDavid Miller pp = p; 36928850dc7SDaniel Borkmann 37028850dc7SDaniel Borkmann out: 371bf5a755fSJerry Chu NAPI_GRO_CB(skb)->flush |= (flush != 0); 37228850dc7SDaniel Borkmann 37328850dc7SDaniel Borkmann return pp; 37428850dc7SDaniel Borkmann } 37528850dc7SDaniel Borkmann 376b1f2abcfSParav Pandit void tcp_gro_complete(struct sk_buff *skb) 37728850dc7SDaniel Borkmann { 37828850dc7SDaniel Borkmann struct tcphdr *th = tcp_hdr(skb); 3798f78010bSEric Dumazet struct skb_shared_info *shinfo; 3808f78010bSEric Dumazet 3818f78010bSEric Dumazet if (skb->encapsulation) 3828f78010bSEric Dumazet skb->inner_transport_header = skb->transport_header; 38328850dc7SDaniel Borkmann 384299603e8SJerry Chu skb->csum_start = (unsigned char *)th - skb->head; 38528850dc7SDaniel Borkmann skb->csum_offset = offsetof(struct tcphdr, check); 38628850dc7SDaniel Borkmann skb->ip_summed = CHECKSUM_PARTIAL; 38728850dc7SDaniel Borkmann 3888f78010bSEric Dumazet shinfo = skb_shinfo(skb); 3898f78010bSEric Dumazet shinfo->gso_segs = NAPI_GRO_CB(skb)->count; 39028850dc7SDaniel Borkmann 39128850dc7SDaniel Borkmann if (th->cwr) 3928f78010bSEric Dumazet shinfo->gso_type |= SKB_GSO_TCP_ECN; 39328850dc7SDaniel Borkmann } 39428850dc7SDaniel Borkmann EXPORT_SYMBOL(tcp_gro_complete); 39528850dc7SDaniel Borkmann 396028e0a47SPaolo Abeni INDIRECT_CALLABLE_SCOPE 397028e0a47SPaolo Abeni struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) 39828850dc7SDaniel Borkmann { 399cc5c00bbSHerbert Xu /* Don't bother verifying checksum if we're going to flush anyway. */ 400149d0774STom Herbert if (!NAPI_GRO_CB(skb)->flush && 401149d0774STom Herbert skb_gro_checksum_validate(skb, IPPROTO_TCP, 402149d0774STom Herbert inet_gro_compute_pseudo)) { 40328850dc7SDaniel Borkmann NAPI_GRO_CB(skb)->flush = 1; 40428850dc7SDaniel Borkmann return NULL; 40528850dc7SDaniel Borkmann } 40628850dc7SDaniel Borkmann 40728850dc7SDaniel Borkmann return tcp_gro_receive(head, skb); 40828850dc7SDaniel Borkmann } 40928850dc7SDaniel Borkmann 410028e0a47SPaolo Abeni INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff) 41128850dc7SDaniel Borkmann { 41228850dc7SDaniel Borkmann const struct iphdr *iph = ip_hdr(skb); 41328850dc7SDaniel Borkmann struct tcphdr *th = tcp_hdr(skb); 41428850dc7SDaniel Borkmann 415*8d95dc47SFelix Fietkau if (unlikely(NAPI_GRO_CB(skb)->is_flist)) { 416*8d95dc47SFelix Fietkau skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4; 417*8d95dc47SFelix Fietkau skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; 418*8d95dc47SFelix Fietkau 419*8d95dc47SFelix Fietkau __skb_incr_checksum_unnecessary(skb); 420*8d95dc47SFelix Fietkau 421*8d95dc47SFelix Fietkau return 0; 422*8d95dc47SFelix Fietkau } 423*8d95dc47SFelix Fietkau 424299603e8SJerry Chu th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, 425299603e8SJerry Chu iph->daddr, 0); 42628850dc7SDaniel Borkmann 4278f78010bSEric Dumazet skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 | 4288f78010bSEric Dumazet (NAPI_GRO_CB(skb)->is_atomic * SKB_GSO_TCP_FIXEDID); 4291530545eSAlexander Duyck 430b1f2abcfSParav Pandit tcp_gro_complete(skb); 431b1f2abcfSParav Pandit return 0; 43228850dc7SDaniel Borkmann } 43328850dc7SDaniel Borkmann 4340139806eSEric Dumazet int __init tcpv4_offload_init(void) 4350139806eSEric Dumazet { 4360139806eSEric Dumazet net_hotdata.tcpv4_offload = (struct net_offload) { 43728850dc7SDaniel Borkmann .callbacks = { 438d020f8f7STom Herbert .gso_segment = tcp4_gso_segment, 43928850dc7SDaniel Borkmann .gro_receive = tcp4_gro_receive, 44028850dc7SDaniel Borkmann .gro_complete = tcp4_gro_complete, 44128850dc7SDaniel Borkmann }, 44228850dc7SDaniel Borkmann }; 4430139806eSEric Dumazet return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP); 44428850dc7SDaniel Borkmann } 445