xref: /linux/net/ipv4/tcp_offload.c (revision 9c93c0b44be36fd5267fb79ae33453f989fbe909)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPV4 GSO/GRO offload support
4  *	Linux INET implementation
5  *
6  *	TCPv4 GSO/GRO support
7  */
8 
9 #include <linux/indirect_call_wrapper.h>
10 #include <linux/skbuff.h>
11 #include <net/gro.h>
12 #include <net/gso.h>
13 #include <net/tcp.h>
14 #include <net/protocol.h>
15 
16 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
17 			   unsigned int seq, unsigned int mss)
18 {
19 	while (skb) {
20 		if (before(ts_seq, seq + mss)) {
21 			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
22 			skb_shinfo(skb)->tskey = ts_seq;
23 			return;
24 		}
25 
26 		skb = skb->next;
27 		seq += mss;
28 	}
29 }
30 
31 static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
32 				     __be32 *oldip, __be32 newip,
33 				     __be16 *oldport, __be16 newport)
34 {
35 	struct tcphdr *th;
36 	struct iphdr *iph;
37 
38 	if (*oldip == newip && *oldport == newport)
39 		return;
40 
41 	th = tcp_hdr(seg);
42 	iph = ip_hdr(seg);
43 
44 	inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
45 	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
46 	*oldport = newport;
47 
48 	csum_replace4(&iph->check, *oldip, newip);
49 	*oldip = newip;
50 }
51 
52 static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
53 {
54 	const struct tcphdr *th;
55 	const struct iphdr *iph;
56 	struct sk_buff *seg;
57 	struct tcphdr *th2;
58 	struct iphdr *iph2;
59 
60 	seg = segs;
61 	th = tcp_hdr(seg);
62 	iph = ip_hdr(seg);
63 	th2 = tcp_hdr(seg->next);
64 	iph2 = ip_hdr(seg->next);
65 
66 	if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
67 	    iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
68 		return segs;
69 
70 	while ((seg = seg->next)) {
71 		th2 = tcp_hdr(seg);
72 		iph2 = ip_hdr(seg);
73 
74 		__tcpv4_gso_segment_csum(seg,
75 					 &iph2->saddr, iph->saddr,
76 					 &th2->source, th->source);
77 		__tcpv4_gso_segment_csum(seg,
78 					 &iph2->daddr, iph->daddr,
79 					 &th2->dest, th->dest);
80 	}
81 
82 	return segs;
83 }
84 
85 static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
86 					      netdev_features_t features)
87 {
88 	skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
89 	if (IS_ERR(skb))
90 		return skb;
91 
92 	return __tcpv4_gso_segment_list_csum(skb);
93 }
94 
95 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
96 					netdev_features_t features)
97 {
98 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
99 		return ERR_PTR(-EINVAL);
100 
101 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
102 		return ERR_PTR(-EINVAL);
103 
104 	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
105 		return __tcp4_gso_segment_list(skb, features);
106 
107 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
108 		const struct iphdr *iph = ip_hdr(skb);
109 		struct tcphdr *th = tcp_hdr(skb);
110 
111 		/* Set up checksum pseudo header, usually expect stack to
112 		 * have done this already.
113 		 */
114 
115 		th->check = 0;
116 		skb->ip_summed = CHECKSUM_PARTIAL;
117 		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
118 	}
119 
120 	return tcp_gso_segment(skb, features);
121 }
122 
123 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
124 				netdev_features_t features)
125 {
126 	struct sk_buff *segs = ERR_PTR(-EINVAL);
127 	unsigned int sum_truesize = 0;
128 	struct tcphdr *th;
129 	unsigned int thlen;
130 	unsigned int seq;
131 	unsigned int oldlen;
132 	unsigned int mss;
133 	struct sk_buff *gso_skb = skb;
134 	__sum16 newcheck;
135 	bool ooo_okay, copy_destructor;
136 	__wsum delta;
137 
138 	th = tcp_hdr(skb);
139 	thlen = th->doff * 4;
140 	if (thlen < sizeof(*th))
141 		goto out;
142 
143 	if (!pskb_may_pull(skb, thlen))
144 		goto out;
145 
146 	oldlen = ~skb->len;
147 	__skb_pull(skb, thlen);
148 
149 	mss = skb_shinfo(skb)->gso_size;
150 	if (unlikely(skb->len <= mss))
151 		goto out;
152 
153 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
154 		/* Packet is from an untrusted source, reset gso_segs. */
155 
156 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
157 
158 		segs = NULL;
159 		goto out;
160 	}
161 
162 	copy_destructor = gso_skb->destructor == tcp_wfree;
163 	ooo_okay = gso_skb->ooo_okay;
164 	/* All segments but the first should have ooo_okay cleared */
165 	skb->ooo_okay = 0;
166 
167 	segs = skb_segment(skb, features);
168 	if (IS_ERR(segs))
169 		goto out;
170 
171 	/* Only first segment might have ooo_okay set */
172 	segs->ooo_okay = ooo_okay;
173 
174 	/* GSO partial and frag_list segmentation only requires splitting
175 	 * the frame into an MSS multiple and possibly a remainder, both
176 	 * cases return a GSO skb. So update the mss now.
177 	 */
178 	if (skb_is_gso(segs))
179 		mss *= skb_shinfo(segs)->gso_segs;
180 
181 	delta = (__force __wsum)htonl(oldlen + thlen + mss);
182 
183 	skb = segs;
184 	th = tcp_hdr(skb);
185 	seq = ntohl(th->seq);
186 
187 	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
188 		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
189 
190 	newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
191 
192 	while (skb->next) {
193 		th->fin = th->psh = 0;
194 		th->check = newcheck;
195 
196 		if (skb->ip_summed == CHECKSUM_PARTIAL)
197 			gso_reset_checksum(skb, ~th->check);
198 		else
199 			th->check = gso_make_checksum(skb, ~th->check);
200 
201 		seq += mss;
202 		if (copy_destructor) {
203 			skb->destructor = gso_skb->destructor;
204 			skb->sk = gso_skb->sk;
205 			sum_truesize += skb->truesize;
206 		}
207 		skb = skb->next;
208 		th = tcp_hdr(skb);
209 
210 		th->seq = htonl(seq);
211 		th->cwr = 0;
212 	}
213 
214 	/* Following permits TCP Small Queues to work well with GSO :
215 	 * The callback to TCP stack will be called at the time last frag
216 	 * is freed at TX completion, and not right now when gso_skb
217 	 * is freed by GSO engine
218 	 */
219 	if (copy_destructor) {
220 		int delta;
221 
222 		swap(gso_skb->sk, skb->sk);
223 		swap(gso_skb->destructor, skb->destructor);
224 		sum_truesize += skb->truesize;
225 		delta = sum_truesize - gso_skb->truesize;
226 		/* In some pathological cases, delta can be negative.
227 		 * We need to either use refcount_add() or refcount_sub_and_test()
228 		 */
229 		if (likely(delta >= 0))
230 			refcount_add(delta, &skb->sk->sk_wmem_alloc);
231 		else
232 			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
233 	}
234 
235 	delta = (__force __wsum)htonl(oldlen +
236 				      (skb_tail_pointer(skb) -
237 				       skb_transport_header(skb)) +
238 				      skb->data_len);
239 	th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
240 	if (skb->ip_summed == CHECKSUM_PARTIAL)
241 		gso_reset_checksum(skb, ~th->check);
242 	else
243 		th->check = gso_make_checksum(skb, ~th->check);
244 out:
245 	return segs;
246 }
247 
248 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
249 {
250 	struct tcphdr *th2;
251 	struct sk_buff *p;
252 
253 	list_for_each_entry(p, head, list) {
254 		if (!NAPI_GRO_CB(p)->same_flow)
255 			continue;
256 
257 		th2 = tcp_hdr(p);
258 		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
259 			NAPI_GRO_CB(p)->same_flow = 0;
260 			continue;
261 		}
262 
263 		return p;
264 	}
265 
266 	return NULL;
267 }
268 
269 struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
270 {
271 	unsigned int thlen, hlen, off;
272 	struct tcphdr *th;
273 
274 	off = skb_gro_offset(skb);
275 	hlen = off + sizeof(*th);
276 	th = skb_gro_header(skb, hlen, off);
277 	if (unlikely(!th))
278 		return NULL;
279 
280 	thlen = th->doff * 4;
281 	if (thlen < sizeof(*th))
282 		return NULL;
283 
284 	hlen = off + thlen;
285 	if (!skb_gro_may_pull(skb, hlen)) {
286 		th = skb_gro_header_slow(skb, hlen, off);
287 		if (unlikely(!th))
288 			return NULL;
289 	}
290 
291 	skb_gro_pull(skb, thlen);
292 
293 	return th;
294 }
295 
296 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
297 				struct tcphdr *th)
298 {
299 	unsigned int thlen = th->doff * 4;
300 	struct sk_buff *pp = NULL;
301 	struct sk_buff *p;
302 	struct tcphdr *th2;
303 	unsigned int len;
304 	__be32 flags;
305 	unsigned int mss = 1;
306 	int flush = 1;
307 	int i;
308 
309 	len = skb_gro_len(skb);
310 	flags = tcp_flag_word(th);
311 
312 	p = tcp_gro_lookup(head, th);
313 	if (!p)
314 		goto out_check_final;
315 
316 	/* Include the IP ID check below from the inner most IP hdr */
317 	th2 = tcp_hdr(p);
318 	flush = NAPI_GRO_CB(p)->flush;
319 	flush |= (__force int)(flags & TCP_FLAG_CWR);
320 	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
321 		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
322 	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
323 	for (i = sizeof(*th); i < thlen; i += 4)
324 		flush |= *(u32 *)((u8 *)th + i) ^
325 			 *(u32 *)((u8 *)th2 + i);
326 
327 	/* When we receive our second frame we can made a decision on if we
328 	 * continue this flow as an atomic flow with a fixed ID or if we use
329 	 * an incrementing ID.
330 	 */
331 	if (NAPI_GRO_CB(p)->flush_id != 1 ||
332 	    NAPI_GRO_CB(p)->count != 1 ||
333 	    !NAPI_GRO_CB(p)->is_atomic)
334 		flush |= NAPI_GRO_CB(p)->flush_id;
335 	else
336 		NAPI_GRO_CB(p)->is_atomic = false;
337 
338 	mss = skb_shinfo(p)->gso_size;
339 
340 	/* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
341 	 * If it is a single frame, do not aggregate it if its length
342 	 * is bigger than our mss.
343 	 */
344 	if (unlikely(skb_is_gso(skb)))
345 		flush |= (mss != skb_shinfo(skb)->gso_size);
346 	else
347 		flush |= (len - 1) >= mss;
348 
349 	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
350 	flush |= skb_cmp_decrypted(p, skb);
351 
352 	if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
353 		flush |= (__force int)(flags ^ tcp_flag_word(th2));
354 		flush |= skb->ip_summed != p->ip_summed;
355 		flush |= skb->csum_level != p->csum_level;
356 		flush |= NAPI_GRO_CB(p)->count >= 64;
357 
358 		if (flush || skb_gro_receive_list(p, skb))
359 			mss = 1;
360 
361 		goto out_check_final;
362 	}
363 
364 	if (flush || skb_gro_receive(p, skb)) {
365 		mss = 1;
366 		goto out_check_final;
367 	}
368 
369 	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
370 
371 out_check_final:
372 	/* Force a flush if last segment is smaller than mss. */
373 	if (unlikely(skb_is_gso(skb)))
374 		flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
375 	else
376 		flush = len < mss;
377 
378 	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
379 					TCP_FLAG_RST | TCP_FLAG_SYN |
380 					TCP_FLAG_FIN));
381 
382 	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
383 		pp = p;
384 
385 	NAPI_GRO_CB(skb)->flush |= (flush != 0);
386 
387 	return pp;
388 }
389 
390 void tcp_gro_complete(struct sk_buff *skb)
391 {
392 	struct tcphdr *th = tcp_hdr(skb);
393 	struct skb_shared_info *shinfo;
394 
395 	if (skb->encapsulation)
396 		skb->inner_transport_header = skb->transport_header;
397 
398 	skb->csum_start = (unsigned char *)th - skb->head;
399 	skb->csum_offset = offsetof(struct tcphdr, check);
400 	skb->ip_summed = CHECKSUM_PARTIAL;
401 
402 	shinfo = skb_shinfo(skb);
403 	shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
404 
405 	if (th->cwr)
406 		shinfo->gso_type |= SKB_GSO_TCP_ECN;
407 }
408 EXPORT_SYMBOL(tcp_gro_complete);
409 
410 static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
411 				    struct tcphdr *th)
412 {
413 	const struct iphdr *iph;
414 	struct sk_buff *p;
415 	struct sock *sk;
416 	struct net *net;
417 	int iif, sdif;
418 
419 	if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
420 		return;
421 
422 	p = tcp_gro_lookup(head, th);
423 	if (p) {
424 		NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
425 		return;
426 	}
427 
428 	inet_get_iif_sdif(skb, &iif, &sdif);
429 	iph = skb_gro_network_header(skb);
430 	net = dev_net(skb->dev);
431 	sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
432 				       iph->saddr, th->source,
433 				       iph->daddr, ntohs(th->dest),
434 				       iif, sdif);
435 	NAPI_GRO_CB(skb)->is_flist = !sk;
436 	if (sk)
437 		sock_put(sk);
438 }
439 
440 INDIRECT_CALLABLE_SCOPE
441 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
442 {
443 	struct tcphdr *th;
444 
445 	/* Don't bother verifying checksum if we're going to flush anyway. */
446 	if (!NAPI_GRO_CB(skb)->flush &&
447 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
448 				      inet_gro_compute_pseudo))
449 		goto flush;
450 
451 	th = tcp_gro_pull_header(skb);
452 	if (!th)
453 		goto flush;
454 
455 	tcp4_check_fraglist_gro(head, skb, th);
456 
457 	return tcp_gro_receive(head, skb, th);
458 
459 flush:
460 	NAPI_GRO_CB(skb)->flush = 1;
461 	return NULL;
462 }
463 
464 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
465 {
466 	const struct iphdr *iph = ip_hdr(skb);
467 	struct tcphdr *th = tcp_hdr(skb);
468 
469 	if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
470 		skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
471 		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
472 
473 		__skb_incr_checksum_unnecessary(skb);
474 
475 		return 0;
476 	}
477 
478 	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
479 				  iph->daddr, 0);
480 
481 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
482 			(NAPI_GRO_CB(skb)->is_atomic * SKB_GSO_TCP_FIXEDID);
483 
484 	tcp_gro_complete(skb);
485 	return 0;
486 }
487 
488 int __init tcpv4_offload_init(void)
489 {
490 	net_hotdata.tcpv4_offload = (struct net_offload) {
491 		.callbacks = {
492 			.gso_segment	=	tcp4_gso_segment,
493 			.gro_receive	=	tcp4_gro_receive,
494 			.gro_complete	=	tcp4_gro_complete,
495 		},
496 	};
497 	return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
498 }
499