xref: /linux/net/ipv4/tcp_offload.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPV4 GSO/GRO offload support
4  *	Linux INET implementation
5  *
6  *	TCPv4 GSO/GRO support
7  */
8 
9 #include <linux/indirect_call_wrapper.h>
10 #include <linux/skbuff.h>
11 #include <net/gro.h>
12 #include <net/gso.h>
13 #include <net/tcp.h>
14 #include <net/protocol.h>
15 
16 static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb,
17 			   unsigned int seq, unsigned int mss)
18 {
19 	u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP;
20 	u32 ts_seq = skb_shinfo(gso_skb)->tskey;
21 
22 	while (skb) {
23 		if (before(ts_seq, seq + mss)) {
24 			skb_shinfo(skb)->tx_flags |= flags;
25 			skb_shinfo(skb)->tskey = ts_seq;
26 			return;
27 		}
28 
29 		skb = skb->next;
30 		seq += mss;
31 	}
32 }
33 
34 static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
35 				     __be32 *oldip, __be32 newip,
36 				     __be16 *oldport, __be16 newport)
37 {
38 	struct tcphdr *th;
39 	struct iphdr *iph;
40 
41 	if (*oldip == newip && *oldport == newport)
42 		return;
43 
44 	th = tcp_hdr(seg);
45 	iph = ip_hdr(seg);
46 
47 	inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
48 	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
49 	*oldport = newport;
50 
51 	csum_replace4(&iph->check, *oldip, newip);
52 	*oldip = newip;
53 }
54 
55 static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
56 {
57 	const struct tcphdr *th;
58 	const struct iphdr *iph;
59 	struct sk_buff *seg;
60 	struct tcphdr *th2;
61 	struct iphdr *iph2;
62 
63 	seg = segs;
64 	th = tcp_hdr(seg);
65 	iph = ip_hdr(seg);
66 	th2 = tcp_hdr(seg->next);
67 	iph2 = ip_hdr(seg->next);
68 
69 	if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
70 	    iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
71 		return segs;
72 
73 	while ((seg = seg->next)) {
74 		th2 = tcp_hdr(seg);
75 		iph2 = ip_hdr(seg);
76 
77 		__tcpv4_gso_segment_csum(seg,
78 					 &iph2->saddr, iph->saddr,
79 					 &th2->source, th->source);
80 		__tcpv4_gso_segment_csum(seg,
81 					 &iph2->daddr, iph->daddr,
82 					 &th2->dest, th->dest);
83 	}
84 
85 	return segs;
86 }
87 
88 static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
89 					      netdev_features_t features)
90 {
91 	skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
92 	if (IS_ERR(skb))
93 		return skb;
94 
95 	return __tcpv4_gso_segment_list_csum(skb);
96 }
97 
98 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
99 					netdev_features_t features)
100 {
101 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
102 		return ERR_PTR(-EINVAL);
103 
104 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
105 		return ERR_PTR(-EINVAL);
106 
107 	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
108 		struct tcphdr *th = tcp_hdr(skb);
109 
110 		if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
111 			return __tcp4_gso_segment_list(skb, features);
112 
113 		skb->ip_summed = CHECKSUM_NONE;
114 	}
115 
116 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
117 		const struct iphdr *iph = ip_hdr(skb);
118 		struct tcphdr *th = tcp_hdr(skb);
119 
120 		/* Set up checksum pseudo header, usually expect stack to
121 		 * have done this already.
122 		 */
123 
124 		th->check = 0;
125 		skb->ip_summed = CHECKSUM_PARTIAL;
126 		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
127 	}
128 
129 	return tcp_gso_segment(skb, features);
130 }
131 
132 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
133 				netdev_features_t features)
134 {
135 	struct sk_buff *segs = ERR_PTR(-EINVAL);
136 	unsigned int sum_truesize = 0;
137 	struct tcphdr *th;
138 	unsigned int thlen;
139 	unsigned int seq;
140 	unsigned int oldlen;
141 	unsigned int mss;
142 	struct sk_buff *gso_skb = skb;
143 	__sum16 newcheck;
144 	bool ooo_okay, copy_destructor;
145 	bool ecn_cwr_mask;
146 	__wsum delta;
147 
148 	th = tcp_hdr(skb);
149 	thlen = th->doff * 4;
150 	if (thlen < sizeof(*th))
151 		goto out;
152 
153 	if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
154 		goto out;
155 
156 	if (!pskb_may_pull(skb, thlen))
157 		goto out;
158 
159 	oldlen = ~skb->len;
160 	__skb_pull(skb, thlen);
161 
162 	mss = skb_shinfo(skb)->gso_size;
163 	if (unlikely(skb->len <= mss))
164 		goto out;
165 
166 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
167 		/* Packet is from an untrusted source, reset gso_segs. */
168 
169 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
170 
171 		segs = NULL;
172 		goto out;
173 	}
174 
175 	copy_destructor = gso_skb->destructor == tcp_wfree;
176 	ooo_okay = gso_skb->ooo_okay;
177 	/* All segments but the first should have ooo_okay cleared */
178 	skb->ooo_okay = 0;
179 
180 	segs = skb_segment(skb, features);
181 	if (IS_ERR(segs))
182 		goto out;
183 
184 	/* Only first segment might have ooo_okay set */
185 	segs->ooo_okay = ooo_okay;
186 
187 	/* GSO partial and frag_list segmentation only requires splitting
188 	 * the frame into an MSS multiple and possibly a remainder, both
189 	 * cases return a GSO skb. So update the mss now.
190 	 */
191 	if (skb_is_gso(segs))
192 		mss *= skb_shinfo(segs)->gso_segs;
193 
194 	delta = (__force __wsum)htonl(oldlen + thlen + mss);
195 
196 	skb = segs;
197 	th = tcp_hdr(skb);
198 	seq = ntohl(th->seq);
199 
200 	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP))
201 		tcp_gso_tstamp(segs, gso_skb, seq, mss);
202 
203 	newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
204 
205 	ecn_cwr_mask = !!(skb_shinfo(gso_skb)->gso_type & SKB_GSO_TCP_ACCECN);
206 
207 	while (skb->next) {
208 		th->fin = th->psh = 0;
209 		th->check = newcheck;
210 
211 		if (skb->ip_summed == CHECKSUM_PARTIAL)
212 			gso_reset_checksum(skb, ~th->check);
213 		else
214 			th->check = gso_make_checksum(skb, ~th->check);
215 
216 		seq += mss;
217 		if (copy_destructor) {
218 			skb->destructor = gso_skb->destructor;
219 			skb->sk = gso_skb->sk;
220 			sum_truesize += skb->truesize;
221 		}
222 		skb = skb->next;
223 		th = tcp_hdr(skb);
224 
225 		th->seq = htonl(seq);
226 
227 		th->cwr &= ecn_cwr_mask;
228 	}
229 
230 	/* Following permits TCP Small Queues to work well with GSO :
231 	 * The callback to TCP stack will be called at the time last frag
232 	 * is freed at TX completion, and not right now when gso_skb
233 	 * is freed by GSO engine
234 	 */
235 	if (copy_destructor) {
236 		int delta;
237 
238 		swap(gso_skb->sk, skb->sk);
239 		swap(gso_skb->destructor, skb->destructor);
240 		sum_truesize += skb->truesize;
241 		delta = sum_truesize - gso_skb->truesize;
242 		/* In some pathological cases, delta can be negative.
243 		 * We need to either use refcount_add() or refcount_sub_and_test()
244 		 */
245 		if (likely(delta >= 0))
246 			refcount_add(delta, &skb->sk->sk_wmem_alloc);
247 		else
248 			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
249 	}
250 
251 	delta = (__force __wsum)htonl(oldlen +
252 				      (skb_tail_pointer(skb) -
253 				       skb_transport_header(skb)) +
254 				      skb->data_len);
255 	th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
256 	if (skb->ip_summed == CHECKSUM_PARTIAL)
257 		gso_reset_checksum(skb, ~th->check);
258 	else
259 		th->check = gso_make_checksum(skb, ~th->check);
260 out:
261 	return segs;
262 }
263 
264 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
265 {
266 	struct tcphdr *th2;
267 	struct sk_buff *p;
268 
269 	list_for_each_entry(p, head, list) {
270 		if (!NAPI_GRO_CB(p)->same_flow)
271 			continue;
272 
273 		th2 = tcp_hdr(p);
274 		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
275 			NAPI_GRO_CB(p)->same_flow = 0;
276 			continue;
277 		}
278 
279 		return p;
280 	}
281 
282 	return NULL;
283 }
284 
285 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
286 				struct tcphdr *th)
287 {
288 	unsigned int thlen = th->doff * 4;
289 	struct sk_buff *pp = NULL;
290 	struct sk_buff *p;
291 	struct tcphdr *th2;
292 	unsigned int len;
293 	__be32 flags;
294 	unsigned int mss = 1;
295 	int flush = 1;
296 	int i;
297 
298 	len = skb_gro_len(skb);
299 	flags = tcp_flag_word(th);
300 
301 	p = tcp_gro_lookup(head, th);
302 	if (!p)
303 		goto out_check_final;
304 
305 	th2 = tcp_hdr(p);
306 	flush = (__force int)(flags & TCP_FLAG_CWR);
307 	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
308 		  ~(TCP_FLAG_FIN | TCP_FLAG_PSH));
309 	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
310 	for (i = sizeof(*th); i < thlen; i += 4)
311 		flush |= *(u32 *)((u8 *)th + i) ^
312 			 *(u32 *)((u8 *)th2 + i);
313 
314 	flush |= gro_receive_network_flush(th, th2, p);
315 
316 	mss = skb_shinfo(p)->gso_size;
317 
318 	/* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
319 	 * If it is a single frame, do not aggregate it if its length
320 	 * is bigger than our mss.
321 	 */
322 	if (unlikely(skb_is_gso(skb)))
323 		flush |= (mss != skb_shinfo(skb)->gso_size);
324 	else
325 		flush |= (len - 1) >= mss;
326 
327 	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
328 	flush |= skb_cmp_decrypted(p, skb);
329 
330 	if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
331 		flush |= (__force int)(flags ^ tcp_flag_word(th2));
332 		flush |= skb->ip_summed != p->ip_summed;
333 		flush |= skb->csum_level != p->csum_level;
334 		flush |= NAPI_GRO_CB(p)->count >= 64;
335 		skb_set_network_header(skb, skb_gro_receive_network_offset(skb));
336 
337 		if (flush || skb_gro_receive_list(p, skb))
338 			mss = 1;
339 
340 		goto out_check_final;
341 	}
342 
343 	if (flush || skb_gro_receive(p, skb)) {
344 		mss = 1;
345 		goto out_check_final;
346 	}
347 
348 	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
349 
350 out_check_final:
351 	/* Force a flush if last segment is smaller than mss. */
352 	if (unlikely(skb_is_gso(skb)))
353 		flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
354 	else
355 		flush = len < mss;
356 
357 	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
358 					TCP_FLAG_RST | TCP_FLAG_SYN |
359 					TCP_FLAG_FIN));
360 
361 	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
362 		pp = p;
363 
364 	NAPI_GRO_CB(skb)->flush |= (flush != 0);
365 
366 	return pp;
367 }
368 
369 void tcp_gro_complete(struct sk_buff *skb)
370 {
371 	struct tcphdr *th = tcp_hdr(skb);
372 	struct skb_shared_info *shinfo;
373 
374 	if (skb->encapsulation)
375 		skb->inner_transport_header = skb->transport_header;
376 
377 	skb->csum_start = (unsigned char *)th - skb->head;
378 	skb->csum_offset = offsetof(struct tcphdr, check);
379 	skb->ip_summed = CHECKSUM_PARTIAL;
380 
381 	shinfo = skb_shinfo(skb);
382 	shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
383 
384 	if (th->cwr)
385 		shinfo->gso_type |= SKB_GSO_TCP_ACCECN;
386 }
387 EXPORT_SYMBOL(tcp_gro_complete);
388 
389 static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
390 				    struct tcphdr *th)
391 {
392 	const struct iphdr *iph;
393 	struct sk_buff *p;
394 	struct sock *sk;
395 	struct net *net;
396 	int iif, sdif;
397 
398 	if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
399 		return;
400 
401 	p = tcp_gro_lookup(head, th);
402 	if (p) {
403 		NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
404 		return;
405 	}
406 
407 	inet_get_iif_sdif(skb, &iif, &sdif);
408 	iph = skb_gro_network_header(skb);
409 	net = dev_net_rcu(skb->dev);
410 	sk = __inet_lookup_established(net, iph->saddr, th->source,
411 				       iph->daddr, ntohs(th->dest),
412 				       iif, sdif);
413 	NAPI_GRO_CB(skb)->is_flist = !sk;
414 	if (sk)
415 		sock_gen_put(sk);
416 }
417 
418 INDIRECT_CALLABLE_SCOPE
419 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
420 {
421 	struct tcphdr *th;
422 
423 	/* Don't bother verifying checksum if we're going to flush anyway. */
424 	if (!NAPI_GRO_CB(skb)->flush &&
425 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
426 				      inet_gro_compute_pseudo))
427 		goto flush;
428 
429 	th = tcp_gro_pull_header(skb);
430 	if (!th)
431 		goto flush;
432 
433 	tcp4_check_fraglist_gro(head, skb, th);
434 
435 	return tcp_gro_receive(head, skb, th);
436 
437 flush:
438 	NAPI_GRO_CB(skb)->flush = 1;
439 	return NULL;
440 }
441 
442 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
443 {
444 	const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
445 	const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
446 	struct tcphdr *th = tcp_hdr(skb);
447 
448 	if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
449 		skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
450 		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
451 
452 		__skb_incr_checksum_unnecessary(skb);
453 
454 		return 0;
455 	}
456 
457 	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
458 				  iph->daddr, 0);
459 
460 	BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID << 1 != SKB_GSO_TCP_FIXEDID_INNER);
461 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
462 			(NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID);
463 
464 	tcp_gro_complete(skb);
465 	return 0;
466 }
467 
468 int __init tcpv4_offload_init(void)
469 {
470 	net_hotdata.tcpv4_offload = (struct net_offload) {
471 		.callbacks = {
472 			.gso_segment	=	tcp4_gso_segment,
473 			.gro_receive	=	tcp4_gro_receive,
474 			.gro_complete	=	tcp4_gro_complete,
475 		},
476 	};
477 	return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
478 }
479