xref: /linux/net/ipv6/ip6_offload.c (revision 63307d015b91e626c97bb82e88054af3d0b74643)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPV6 GSO/GRO offload support
4  *	Linux INET6 implementation
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/socket.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/printk.h>
12 
13 #include <net/protocol.h>
14 #include <net/ipv6.h>
15 #include <net/inet_common.h>
16 
17 #include "ip6_offload.h"
18 
19 /* All GRO functions are always builtin, except UDP over ipv6, which lays in
20  * ipv6 module, as it depends on UDPv6 lookup function, so we need special care
21  * when ipv6 is built as a module
22  */
23 #if IS_BUILTIN(CONFIG_IPV6)
24 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
25 #else
26 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
27 #endif
28 
29 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb)	\
30 ({								\
31 	unlikely(gro_recursion_inc_test(skb)) ?			\
32 		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
33 		INDIRECT_CALL_L4(cb, f2, f1, head, skb);	\
34 })
35 
36 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
37 {
38 	const struct net_offload *ops = NULL;
39 
40 	for (;;) {
41 		struct ipv6_opt_hdr *opth;
42 		int len;
43 
44 		if (proto != NEXTHDR_HOP) {
45 			ops = rcu_dereference(inet6_offloads[proto]);
46 
47 			if (unlikely(!ops))
48 				break;
49 
50 			if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
51 				break;
52 		}
53 
54 		if (unlikely(!pskb_may_pull(skb, 8)))
55 			break;
56 
57 		opth = (void *)skb->data;
58 		len = ipv6_optlen(opth);
59 
60 		if (unlikely(!pskb_may_pull(skb, len)))
61 			break;
62 
63 		opth = (void *)skb->data;
64 		proto = opth->nexthdr;
65 		__skb_pull(skb, len);
66 	}
67 
68 	return proto;
69 }
70 
71 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
72 	netdev_features_t features)
73 {
74 	struct sk_buff *segs = ERR_PTR(-EINVAL);
75 	struct ipv6hdr *ipv6h;
76 	const struct net_offload *ops;
77 	int proto;
78 	struct frag_hdr *fptr;
79 	unsigned int payload_len;
80 	u8 *prevhdr;
81 	int offset = 0;
82 	bool encap, udpfrag;
83 	int nhoff;
84 	bool gso_partial;
85 
86 	skb_reset_network_header(skb);
87 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
88 	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
89 		goto out;
90 
91 	encap = SKB_GSO_CB(skb)->encap_level > 0;
92 	if (encap)
93 		features &= skb->dev->hw_enc_features;
94 	SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
95 
96 	ipv6h = ipv6_hdr(skb);
97 	__skb_pull(skb, sizeof(*ipv6h));
98 	segs = ERR_PTR(-EPROTONOSUPPORT);
99 
100 	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
101 
102 	if (skb->encapsulation &&
103 	    skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
104 		udpfrag = proto == IPPROTO_UDP && encap &&
105 			  (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
106 	else
107 		udpfrag = proto == IPPROTO_UDP && !skb->encapsulation &&
108 			  (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
109 
110 	ops = rcu_dereference(inet6_offloads[proto]);
111 	if (likely(ops && ops->callbacks.gso_segment)) {
112 		skb_reset_transport_header(skb);
113 		segs = ops->callbacks.gso_segment(skb, features);
114 	}
115 
116 	if (IS_ERR_OR_NULL(segs))
117 		goto out;
118 
119 	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
120 
121 	for (skb = segs; skb; skb = skb->next) {
122 		ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
123 		if (gso_partial && skb_is_gso(skb))
124 			payload_len = skb_shinfo(skb)->gso_size +
125 				      SKB_GSO_CB(skb)->data_offset +
126 				      skb->head - (unsigned char *)(ipv6h + 1);
127 		else
128 			payload_len = skb->len - nhoff - sizeof(*ipv6h);
129 		ipv6h->payload_len = htons(payload_len);
130 		skb->network_header = (u8 *)ipv6h - skb->head;
131 		skb_reset_mac_len(skb);
132 
133 		if (udpfrag) {
134 			int err = ip6_find_1stfragopt(skb, &prevhdr);
135 			if (err < 0) {
136 				kfree_skb_list(segs);
137 				return ERR_PTR(err);
138 			}
139 			fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
140 			fptr->frag_off = htons(offset);
141 			if (skb->next)
142 				fptr->frag_off |= htons(IP6_MF);
143 			offset += (ntohs(ipv6h->payload_len) -
144 				   sizeof(struct frag_hdr));
145 		}
146 		if (encap)
147 			skb_reset_inner_headers(skb);
148 	}
149 
150 out:
151 	return segs;
152 }
153 
154 /* Return the total length of all the extension hdrs, following the same
155  * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
156  */
157 static int ipv6_exthdrs_len(struct ipv6hdr *iph,
158 			    const struct net_offload **opps)
159 {
160 	struct ipv6_opt_hdr *opth = (void *)iph;
161 	int len = 0, proto, optlen = sizeof(*iph);
162 
163 	proto = iph->nexthdr;
164 	for (;;) {
165 		if (proto != NEXTHDR_HOP) {
166 			*opps = rcu_dereference(inet6_offloads[proto]);
167 			if (unlikely(!(*opps)))
168 				break;
169 			if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
170 				break;
171 		}
172 		opth = (void *)opth + optlen;
173 		optlen = ipv6_optlen(opth);
174 		len += optlen;
175 		proto = opth->nexthdr;
176 	}
177 	return len;
178 }
179 
180 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *,
181 							   struct sk_buff *));
182 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
183 							   struct sk_buff *));
184 INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
185 							 struct sk_buff *skb)
186 {
187 	const struct net_offload *ops;
188 	struct sk_buff *pp = NULL;
189 	struct sk_buff *p;
190 	struct ipv6hdr *iph;
191 	unsigned int nlen;
192 	unsigned int hlen;
193 	unsigned int off;
194 	u16 flush = 1;
195 	int proto;
196 
197 	off = skb_gro_offset(skb);
198 	hlen = off + sizeof(*iph);
199 	iph = skb_gro_header_fast(skb, off);
200 	if (skb_gro_header_hard(skb, hlen)) {
201 		iph = skb_gro_header_slow(skb, hlen, off);
202 		if (unlikely(!iph))
203 			goto out;
204 	}
205 
206 	skb_set_network_header(skb, off);
207 	skb_gro_pull(skb, sizeof(*iph));
208 	skb_set_transport_header(skb, skb_gro_offset(skb));
209 
210 	flush += ntohs(iph->payload_len) != skb_gro_len(skb);
211 
212 	rcu_read_lock();
213 	proto = iph->nexthdr;
214 	ops = rcu_dereference(inet6_offloads[proto]);
215 	if (!ops || !ops->callbacks.gro_receive) {
216 		__pskb_pull(skb, skb_gro_offset(skb));
217 		skb_gro_frag0_invalidate(skb);
218 		proto = ipv6_gso_pull_exthdrs(skb, proto);
219 		skb_gro_pull(skb, -skb_transport_offset(skb));
220 		skb_reset_transport_header(skb);
221 		__skb_push(skb, skb_gro_offset(skb));
222 
223 		ops = rcu_dereference(inet6_offloads[proto]);
224 		if (!ops || !ops->callbacks.gro_receive)
225 			goto out_unlock;
226 
227 		iph = ipv6_hdr(skb);
228 	}
229 
230 	NAPI_GRO_CB(skb)->proto = proto;
231 
232 	flush--;
233 	nlen = skb_network_header_len(skb);
234 
235 	list_for_each_entry(p, head, list) {
236 		const struct ipv6hdr *iph2;
237 		__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
238 
239 		if (!NAPI_GRO_CB(p)->same_flow)
240 			continue;
241 
242 		iph2 = (struct ipv6hdr *)(p->data + off);
243 		first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
244 
245 		/* All fields must match except length and Traffic Class.
246 		 * XXX skbs on the gro_list have all been parsed and pulled
247 		 * already so we don't need to compare nlen
248 		 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
249 		 * memcmp() alone below is sufficient, right?
250 		 */
251 		 if ((first_word & htonl(0xF00FFFFF)) ||
252 		    !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
253 		    !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
254 		    *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
255 not_same_flow:
256 			NAPI_GRO_CB(p)->same_flow = 0;
257 			continue;
258 		}
259 		if (unlikely(nlen > sizeof(struct ipv6hdr))) {
260 			if (memcmp(iph + 1, iph2 + 1,
261 				   nlen - sizeof(struct ipv6hdr)))
262 				goto not_same_flow;
263 		}
264 		/* flush if Traffic Class fields are different */
265 		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
266 		NAPI_GRO_CB(p)->flush |= flush;
267 
268 		/* If the previous IP ID value was based on an atomic
269 		 * datagram we can overwrite the value and ignore it.
270 		 */
271 		if (NAPI_GRO_CB(skb)->is_atomic)
272 			NAPI_GRO_CB(p)->flush_id = 0;
273 	}
274 
275 	NAPI_GRO_CB(skb)->is_atomic = true;
276 	NAPI_GRO_CB(skb)->flush |= flush;
277 
278 	skb_gro_postpull_rcsum(skb, iph, nlen);
279 
280 	pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
281 					 ops->callbacks.gro_receive, head, skb);
282 
283 out_unlock:
284 	rcu_read_unlock();
285 
286 out:
287 	skb_gro_flush_final(skb, pp, flush);
288 
289 	return pp;
290 }
291 
292 static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
293 					      struct sk_buff *skb)
294 {
295 	/* Common GRO receive for SIT and IP6IP6 */
296 
297 	if (NAPI_GRO_CB(skb)->encap_mark) {
298 		NAPI_GRO_CB(skb)->flush = 1;
299 		return NULL;
300 	}
301 
302 	NAPI_GRO_CB(skb)->encap_mark = 1;
303 
304 	return ipv6_gro_receive(head, skb);
305 }
306 
307 static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
308 					  struct sk_buff *skb)
309 {
310 	/* Common GRO receive for SIT and IP6IP6 */
311 
312 	if (NAPI_GRO_CB(skb)->encap_mark) {
313 		NAPI_GRO_CB(skb)->flush = 1;
314 		return NULL;
315 	}
316 
317 	NAPI_GRO_CB(skb)->encap_mark = 1;
318 
319 	return inet_gro_receive(head, skb);
320 }
321 
322 INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *, int));
323 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
324 INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
325 {
326 	const struct net_offload *ops;
327 	struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
328 	int err = -ENOSYS;
329 
330 	if (skb->encapsulation) {
331 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
332 		skb_set_inner_network_header(skb, nhoff);
333 	}
334 
335 	iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
336 
337 	rcu_read_lock();
338 
339 	nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
340 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
341 		goto out_unlock;
342 
343 	err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
344 			       udp6_gro_complete, skb, nhoff);
345 
346 out_unlock:
347 	rcu_read_unlock();
348 
349 	return err;
350 }
351 
352 static int sit_gro_complete(struct sk_buff *skb, int nhoff)
353 {
354 	skb->encapsulation = 1;
355 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
356 	return ipv6_gro_complete(skb, nhoff);
357 }
358 
359 static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff)
360 {
361 	skb->encapsulation = 1;
362 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
363 	return ipv6_gro_complete(skb, nhoff);
364 }
365 
366 static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
367 {
368 	skb->encapsulation = 1;
369 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
370 	return inet_gro_complete(skb, nhoff);
371 }
372 
373 static struct packet_offload ipv6_packet_offload __read_mostly = {
374 	.type = cpu_to_be16(ETH_P_IPV6),
375 	.callbacks = {
376 		.gso_segment = ipv6_gso_segment,
377 		.gro_receive = ipv6_gro_receive,
378 		.gro_complete = ipv6_gro_complete,
379 	},
380 };
381 
382 static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
383 				       netdev_features_t features)
384 {
385 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
386 		return ERR_PTR(-EINVAL);
387 
388 	return ipv6_gso_segment(skb, features);
389 }
390 
391 static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
392 					  netdev_features_t features)
393 {
394 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
395 		return ERR_PTR(-EINVAL);
396 
397 	return inet_gso_segment(skb, features);
398 }
399 
400 static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
401 					  netdev_features_t features)
402 {
403 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
404 		return ERR_PTR(-EINVAL);
405 
406 	return ipv6_gso_segment(skb, features);
407 }
408 
409 static const struct net_offload sit_offload = {
410 	.callbacks = {
411 		.gso_segment	= sit_gso_segment,
412 		.gro_receive    = sit_ip6ip6_gro_receive,
413 		.gro_complete   = sit_gro_complete,
414 	},
415 };
416 
417 static const struct net_offload ip4ip6_offload = {
418 	.callbacks = {
419 		.gso_segment	= ip4ip6_gso_segment,
420 		.gro_receive    = ip4ip6_gro_receive,
421 		.gro_complete   = ip4ip6_gro_complete,
422 	},
423 };
424 
425 static const struct net_offload ip6ip6_offload = {
426 	.callbacks = {
427 		.gso_segment	= ip6ip6_gso_segment,
428 		.gro_receive    = sit_ip6ip6_gro_receive,
429 		.gro_complete   = ip6ip6_gro_complete,
430 	},
431 };
432 static int __init ipv6_offload_init(void)
433 {
434 
435 	if (tcpv6_offload_init() < 0)
436 		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
437 	if (ipv6_exthdrs_offload_init() < 0)
438 		pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
439 
440 	dev_add_offload(&ipv6_packet_offload);
441 
442 	inet_add_offload(&sit_offload, IPPROTO_IPV6);
443 	inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
444 	inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP);
445 
446 	return 0;
447 }
448 
449 fs_initcall(ipv6_offload_init);
450