1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * TCPv4 GSO/GRO support
7 */
8
9 #include <linux/indirect_call_wrapper.h>
10 #include <linux/skbuff.h>
11 #include <net/gro.h>
12 #include <net/gso.h>
13 #include <net/tcp.h>
14 #include <net/protocol.h>
15
tcp_gso_tstamp(struct sk_buff * skb,unsigned int ts_seq,unsigned int seq,unsigned int mss)16 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
17 unsigned int seq, unsigned int mss)
18 {
19 while (skb) {
20 if (before(ts_seq, seq + mss)) {
21 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
22 skb_shinfo(skb)->tskey = ts_seq;
23 return;
24 }
25
26 skb = skb->next;
27 seq += mss;
28 }
29 }
30
__tcpv4_gso_segment_csum(struct sk_buff * seg,__be32 * oldip,__be32 newip,__be16 * oldport,__be16 newport)31 static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
32 __be32 *oldip, __be32 newip,
33 __be16 *oldport, __be16 newport)
34 {
35 struct tcphdr *th;
36 struct iphdr *iph;
37
38 if (*oldip == newip && *oldport == newport)
39 return;
40
41 th = tcp_hdr(seg);
42 iph = ip_hdr(seg);
43
44 inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
45 inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
46 *oldport = newport;
47
48 csum_replace4(&iph->check, *oldip, newip);
49 *oldip = newip;
50 }
51
__tcpv4_gso_segment_list_csum(struct sk_buff * segs)52 static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
53 {
54 const struct tcphdr *th;
55 const struct iphdr *iph;
56 struct sk_buff *seg;
57 struct tcphdr *th2;
58 struct iphdr *iph2;
59
60 seg = segs;
61 th = tcp_hdr(seg);
62 iph = ip_hdr(seg);
63 th2 = tcp_hdr(seg->next);
64 iph2 = ip_hdr(seg->next);
65
66 if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
67 iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
68 return segs;
69
70 while ((seg = seg->next)) {
71 th2 = tcp_hdr(seg);
72 iph2 = ip_hdr(seg);
73
74 __tcpv4_gso_segment_csum(seg,
75 &iph2->saddr, iph->saddr,
76 &th2->source, th->source);
77 __tcpv4_gso_segment_csum(seg,
78 &iph2->daddr, iph->daddr,
79 &th2->dest, th->dest);
80 }
81
82 return segs;
83 }
84
__tcp4_gso_segment_list(struct sk_buff * skb,netdev_features_t features)85 static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
86 netdev_features_t features)
87 {
88 skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
89 if (IS_ERR(skb))
90 return skb;
91
92 return __tcpv4_gso_segment_list_csum(skb);
93 }
94
tcp4_gso_segment(struct sk_buff * skb,netdev_features_t features)95 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
96 netdev_features_t features)
97 {
98 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
99 return ERR_PTR(-EINVAL);
100
101 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
102 return ERR_PTR(-EINVAL);
103
104 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
105 struct tcphdr *th = tcp_hdr(skb);
106
107 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
108 return __tcp4_gso_segment_list(skb, features);
109
110 skb->ip_summed = CHECKSUM_NONE;
111 }
112
113 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
114 const struct iphdr *iph = ip_hdr(skb);
115 struct tcphdr *th = tcp_hdr(skb);
116
117 /* Set up checksum pseudo header, usually expect stack to
118 * have done this already.
119 */
120
121 th->check = 0;
122 skb->ip_summed = CHECKSUM_PARTIAL;
123 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
124 }
125
126 return tcp_gso_segment(skb, features);
127 }
128
tcp_gso_segment(struct sk_buff * skb,netdev_features_t features)129 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
130 netdev_features_t features)
131 {
132 struct sk_buff *segs = ERR_PTR(-EINVAL);
133 unsigned int sum_truesize = 0;
134 struct tcphdr *th;
135 unsigned int thlen;
136 unsigned int seq;
137 unsigned int oldlen;
138 unsigned int mss;
139 struct sk_buff *gso_skb = skb;
140 __sum16 newcheck;
141 bool ooo_okay, copy_destructor;
142 __wsum delta;
143
144 th = tcp_hdr(skb);
145 thlen = th->doff * 4;
146 if (thlen < sizeof(*th))
147 goto out;
148
149 if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
150 goto out;
151
152 if (!pskb_may_pull(skb, thlen))
153 goto out;
154
155 oldlen = ~skb->len;
156 __skb_pull(skb, thlen);
157
158 mss = skb_shinfo(skb)->gso_size;
159 if (unlikely(skb->len <= mss))
160 goto out;
161
162 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
163 /* Packet is from an untrusted source, reset gso_segs. */
164
165 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
166
167 segs = NULL;
168 goto out;
169 }
170
171 copy_destructor = gso_skb->destructor == tcp_wfree;
172 ooo_okay = gso_skb->ooo_okay;
173 /* All segments but the first should have ooo_okay cleared */
174 skb->ooo_okay = 0;
175
176 segs = skb_segment(skb, features);
177 if (IS_ERR(segs))
178 goto out;
179
180 /* Only first segment might have ooo_okay set */
181 segs->ooo_okay = ooo_okay;
182
183 /* GSO partial and frag_list segmentation only requires splitting
184 * the frame into an MSS multiple and possibly a remainder, both
185 * cases return a GSO skb. So update the mss now.
186 */
187 if (skb_is_gso(segs))
188 mss *= skb_shinfo(segs)->gso_segs;
189
190 delta = (__force __wsum)htonl(oldlen + thlen + mss);
191
192 skb = segs;
193 th = tcp_hdr(skb);
194 seq = ntohl(th->seq);
195
196 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
197 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
198
199 newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
200
201 while (skb->next) {
202 th->fin = th->psh = 0;
203 th->check = newcheck;
204
205 if (skb->ip_summed == CHECKSUM_PARTIAL)
206 gso_reset_checksum(skb, ~th->check);
207 else
208 th->check = gso_make_checksum(skb, ~th->check);
209
210 seq += mss;
211 if (copy_destructor) {
212 skb->destructor = gso_skb->destructor;
213 skb->sk = gso_skb->sk;
214 sum_truesize += skb->truesize;
215 }
216 skb = skb->next;
217 th = tcp_hdr(skb);
218
219 th->seq = htonl(seq);
220 th->cwr = 0;
221 }
222
223 /* Following permits TCP Small Queues to work well with GSO :
224 * The callback to TCP stack will be called at the time last frag
225 * is freed at TX completion, and not right now when gso_skb
226 * is freed by GSO engine
227 */
228 if (copy_destructor) {
229 int delta;
230
231 swap(gso_skb->sk, skb->sk);
232 swap(gso_skb->destructor, skb->destructor);
233 sum_truesize += skb->truesize;
234 delta = sum_truesize - gso_skb->truesize;
235 /* In some pathological cases, delta can be negative.
236 * We need to either use refcount_add() or refcount_sub_and_test()
237 */
238 if (likely(delta >= 0))
239 refcount_add(delta, &skb->sk->sk_wmem_alloc);
240 else
241 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
242 }
243
244 delta = (__force __wsum)htonl(oldlen +
245 (skb_tail_pointer(skb) -
246 skb_transport_header(skb)) +
247 skb->data_len);
248 th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
249 if (skb->ip_summed == CHECKSUM_PARTIAL)
250 gso_reset_checksum(skb, ~th->check);
251 else
252 th->check = gso_make_checksum(skb, ~th->check);
253 out:
254 return segs;
255 }
256
tcp_gro_lookup(struct list_head * head,struct tcphdr * th)257 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
258 {
259 struct tcphdr *th2;
260 struct sk_buff *p;
261
262 list_for_each_entry(p, head, list) {
263 if (!NAPI_GRO_CB(p)->same_flow)
264 continue;
265
266 th2 = tcp_hdr(p);
267 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
268 NAPI_GRO_CB(p)->same_flow = 0;
269 continue;
270 }
271
272 return p;
273 }
274
275 return NULL;
276 }
277
tcp_gro_pull_header(struct sk_buff * skb)278 struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
279 {
280 unsigned int thlen, hlen, off;
281 struct tcphdr *th;
282
283 off = skb_gro_offset(skb);
284 hlen = off + sizeof(*th);
285 th = skb_gro_header(skb, hlen, off);
286 if (unlikely(!th))
287 return NULL;
288
289 thlen = th->doff * 4;
290 if (thlen < sizeof(*th))
291 return NULL;
292
293 hlen = off + thlen;
294 if (!skb_gro_may_pull(skb, hlen)) {
295 th = skb_gro_header_slow(skb, hlen, off);
296 if (unlikely(!th))
297 return NULL;
298 }
299
300 skb_gro_pull(skb, thlen);
301
302 return th;
303 }
304
tcp_gro_receive(struct list_head * head,struct sk_buff * skb,struct tcphdr * th)305 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
306 struct tcphdr *th)
307 {
308 unsigned int thlen = th->doff * 4;
309 struct sk_buff *pp = NULL;
310 struct sk_buff *p;
311 struct tcphdr *th2;
312 unsigned int len;
313 __be32 flags;
314 unsigned int mss = 1;
315 int flush = 1;
316 int i;
317
318 len = skb_gro_len(skb);
319 flags = tcp_flag_word(th);
320
321 p = tcp_gro_lookup(head, th);
322 if (!p)
323 goto out_check_final;
324
325 th2 = tcp_hdr(p);
326 flush = (__force int)(flags & TCP_FLAG_CWR);
327 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
328 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
329 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
330 for (i = sizeof(*th); i < thlen; i += 4)
331 flush |= *(u32 *)((u8 *)th + i) ^
332 *(u32 *)((u8 *)th2 + i);
333
334 flush |= gro_receive_network_flush(th, th2, p);
335
336 mss = skb_shinfo(p)->gso_size;
337
338 /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
339 * If it is a single frame, do not aggregate it if its length
340 * is bigger than our mss.
341 */
342 if (unlikely(skb_is_gso(skb)))
343 flush |= (mss != skb_shinfo(skb)->gso_size);
344 else
345 flush |= (len - 1) >= mss;
346
347 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
348 flush |= skb_cmp_decrypted(p, skb);
349
350 if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
351 flush |= (__force int)(flags ^ tcp_flag_word(th2));
352 flush |= skb->ip_summed != p->ip_summed;
353 flush |= skb->csum_level != p->csum_level;
354 flush |= NAPI_GRO_CB(p)->count >= 64;
355
356 if (flush || skb_gro_receive_list(p, skb))
357 mss = 1;
358
359 goto out_check_final;
360 }
361
362 if (flush || skb_gro_receive(p, skb)) {
363 mss = 1;
364 goto out_check_final;
365 }
366
367 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
368
369 out_check_final:
370 /* Force a flush if last segment is smaller than mss. */
371 if (unlikely(skb_is_gso(skb)))
372 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
373 else
374 flush = len < mss;
375
376 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
377 TCP_FLAG_RST | TCP_FLAG_SYN |
378 TCP_FLAG_FIN));
379
380 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
381 pp = p;
382
383 NAPI_GRO_CB(skb)->flush |= (flush != 0);
384
385 return pp;
386 }
387
tcp_gro_complete(struct sk_buff * skb)388 void tcp_gro_complete(struct sk_buff *skb)
389 {
390 struct tcphdr *th = tcp_hdr(skb);
391 struct skb_shared_info *shinfo;
392
393 if (skb->encapsulation)
394 skb->inner_transport_header = skb->transport_header;
395
396 skb->csum_start = (unsigned char *)th - skb->head;
397 skb->csum_offset = offsetof(struct tcphdr, check);
398 skb->ip_summed = CHECKSUM_PARTIAL;
399
400 shinfo = skb_shinfo(skb);
401 shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
402
403 if (th->cwr)
404 shinfo->gso_type |= SKB_GSO_TCP_ECN;
405 }
406 EXPORT_SYMBOL(tcp_gro_complete);
407
tcp4_check_fraglist_gro(struct list_head * head,struct sk_buff * skb,struct tcphdr * th)408 static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
409 struct tcphdr *th)
410 {
411 const struct iphdr *iph;
412 struct sk_buff *p;
413 struct sock *sk;
414 struct net *net;
415 int iif, sdif;
416
417 if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
418 return;
419
420 p = tcp_gro_lookup(head, th);
421 if (p) {
422 NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
423 return;
424 }
425
426 inet_get_iif_sdif(skb, &iif, &sdif);
427 iph = skb_gro_network_header(skb);
428 net = dev_net(skb->dev);
429 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
430 iph->saddr, th->source,
431 iph->daddr, ntohs(th->dest),
432 iif, sdif);
433 NAPI_GRO_CB(skb)->is_flist = !sk;
434 if (sk)
435 sock_put(sk);
436 }
437
438 INDIRECT_CALLABLE_SCOPE
tcp4_gro_receive(struct list_head * head,struct sk_buff * skb)439 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
440 {
441 struct tcphdr *th;
442
443 /* Don't bother verifying checksum if we're going to flush anyway. */
444 if (!NAPI_GRO_CB(skb)->flush &&
445 skb_gro_checksum_validate(skb, IPPROTO_TCP,
446 inet_gro_compute_pseudo))
447 goto flush;
448
449 th = tcp_gro_pull_header(skb);
450 if (!th)
451 goto flush;
452
453 tcp4_check_fraglist_gro(head, skb, th);
454
455 return tcp_gro_receive(head, skb, th);
456
457 flush:
458 NAPI_GRO_CB(skb)->flush = 1;
459 return NULL;
460 }
461
tcp4_gro_complete(struct sk_buff * skb,int thoff)462 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
463 {
464 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
465 const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
466 struct tcphdr *th = tcp_hdr(skb);
467
468 if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
469 skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
470 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
471
472 __skb_incr_checksum_unnecessary(skb);
473
474 return 0;
475 }
476
477 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
478 iph->daddr, 0);
479
480 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
481 (NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID);
482
483 tcp_gro_complete(skb);
484 return 0;
485 }
486
tcpv4_offload_init(void)487 int __init tcpv4_offload_init(void)
488 {
489 net_hotdata.tcpv4_offload = (struct net_offload) {
490 .callbacks = {
491 .gso_segment = tcp4_gso_segment,
492 .gro_receive = tcp4_gro_receive,
493 .gro_complete = tcp4_gro_complete,
494 },
495 };
496 return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
497 }
498