xref: /linux/net/ipv6/ip6_offload.c (revision ca853314e78b0a65c20b6a889a23c31f918d4aa2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPV6 GSO/GRO offload support
4  *	Linux INET6 implementation
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/socket.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/printk.h>
12 
13 #include <net/protocol.h>
14 #include <net/ipv6.h>
15 #include <net/inet_common.h>
16 #include <net/tcp.h>
17 #include <net/udp.h>
18 
19 #include "ip6_offload.h"
20 
21 /* All GRO functions are always builtin, except UDP over ipv6, which lays in
22  * ipv6 module, as it depends on UDPv6 lookup function, so we need special care
23  * when ipv6 is built as a module
24  */
25 #if IS_BUILTIN(CONFIG_IPV6)
26 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
27 #else
28 #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
29 #endif
30 
31 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb)	\
32 ({								\
33 	unlikely(gro_recursion_inc_test(skb)) ?			\
34 		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
35 		INDIRECT_CALL_L4(cb, f2, f1, head, skb);	\
36 })
37 
38 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
39 {
40 	const struct net_offload *ops = NULL;
41 
42 	for (;;) {
43 		struct ipv6_opt_hdr *opth;
44 		int len;
45 
46 		if (proto != NEXTHDR_HOP) {
47 			ops = rcu_dereference(inet6_offloads[proto]);
48 
49 			if (unlikely(!ops))
50 				break;
51 
52 			if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
53 				break;
54 		}
55 
56 		if (unlikely(!pskb_may_pull(skb, 8)))
57 			break;
58 
59 		opth = (void *)skb->data;
60 		len = ipv6_optlen(opth);
61 
62 		if (unlikely(!pskb_may_pull(skb, len)))
63 			break;
64 
65 		opth = (void *)skb->data;
66 		proto = opth->nexthdr;
67 		__skb_pull(skb, len);
68 	}
69 
70 	return proto;
71 }
72 
73 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
74 	netdev_features_t features)
75 {
76 	struct sk_buff *segs = ERR_PTR(-EINVAL);
77 	struct ipv6hdr *ipv6h;
78 	const struct net_offload *ops;
79 	int proto;
80 	struct frag_hdr *fptr;
81 	unsigned int payload_len;
82 	u8 *prevhdr;
83 	int offset = 0;
84 	bool encap, udpfrag;
85 	int nhoff;
86 	bool gso_partial;
87 
88 	skb_reset_network_header(skb);
89 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
90 	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
91 		goto out;
92 
93 	encap = SKB_GSO_CB(skb)->encap_level > 0;
94 	if (encap)
95 		features &= skb->dev->hw_enc_features;
96 	SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
97 
98 	ipv6h = ipv6_hdr(skb);
99 	__skb_pull(skb, sizeof(*ipv6h));
100 	segs = ERR_PTR(-EPROTONOSUPPORT);
101 
102 	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
103 
104 	if (skb->encapsulation &&
105 	    skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
106 		udpfrag = proto == IPPROTO_UDP && encap &&
107 			  (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
108 	else
109 		udpfrag = proto == IPPROTO_UDP && !skb->encapsulation &&
110 			  (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
111 
112 	ops = rcu_dereference(inet6_offloads[proto]);
113 	if (likely(ops && ops->callbacks.gso_segment)) {
114 		skb_reset_transport_header(skb);
115 		segs = ops->callbacks.gso_segment(skb, features);
116 	}
117 
118 	if (IS_ERR_OR_NULL(segs))
119 		goto out;
120 
121 	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
122 
123 	for (skb = segs; skb; skb = skb->next) {
124 		ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
125 		if (gso_partial && skb_is_gso(skb))
126 			payload_len = skb_shinfo(skb)->gso_size +
127 				      SKB_GSO_CB(skb)->data_offset +
128 				      skb->head - (unsigned char *)(ipv6h + 1);
129 		else
130 			payload_len = skb->len - nhoff - sizeof(*ipv6h);
131 		ipv6h->payload_len = htons(payload_len);
132 		skb->network_header = (u8 *)ipv6h - skb->head;
133 		skb_reset_mac_len(skb);
134 
135 		if (udpfrag) {
136 			int err = ip6_find_1stfragopt(skb, &prevhdr);
137 			if (err < 0) {
138 				kfree_skb_list(segs);
139 				return ERR_PTR(err);
140 			}
141 			fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
142 			fptr->frag_off = htons(offset);
143 			if (skb->next)
144 				fptr->frag_off |= htons(IP6_MF);
145 			offset += (ntohs(ipv6h->payload_len) -
146 				   sizeof(struct frag_hdr));
147 		}
148 		if (encap)
149 			skb_reset_inner_headers(skb);
150 	}
151 
152 out:
153 	return segs;
154 }
155 
156 /* Return the total length of all the extension hdrs, following the same
157  * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
158  */
159 static int ipv6_exthdrs_len(struct ipv6hdr *iph,
160 			    const struct net_offload **opps)
161 {
162 	struct ipv6_opt_hdr *opth = (void *)iph;
163 	int len = 0, proto, optlen = sizeof(*iph);
164 
165 	proto = iph->nexthdr;
166 	for (;;) {
167 		if (proto != NEXTHDR_HOP) {
168 			*opps = rcu_dereference(inet6_offloads[proto]);
169 			if (unlikely(!(*opps)))
170 				break;
171 			if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
172 				break;
173 		}
174 		opth = (void *)opth + optlen;
175 		optlen = ipv6_optlen(opth);
176 		len += optlen;
177 		proto = opth->nexthdr;
178 	}
179 	return len;
180 }
181 
182 INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
183 							 struct sk_buff *skb)
184 {
185 	const struct net_offload *ops;
186 	struct sk_buff *pp = NULL;
187 	struct sk_buff *p;
188 	struct ipv6hdr *iph;
189 	unsigned int nlen;
190 	unsigned int hlen;
191 	unsigned int off;
192 	u16 flush = 1;
193 	int proto;
194 
195 	off = skb_gro_offset(skb);
196 	hlen = off + sizeof(*iph);
197 	iph = skb_gro_header_fast(skb, off);
198 	if (skb_gro_header_hard(skb, hlen)) {
199 		iph = skb_gro_header_slow(skb, hlen, off);
200 		if (unlikely(!iph))
201 			goto out;
202 	}
203 
204 	skb_set_network_header(skb, off);
205 	skb_gro_pull(skb, sizeof(*iph));
206 	skb_set_transport_header(skb, skb_gro_offset(skb));
207 
208 	flush += ntohs(iph->payload_len) != skb_gro_len(skb);
209 
210 	rcu_read_lock();
211 	proto = iph->nexthdr;
212 	ops = rcu_dereference(inet6_offloads[proto]);
213 	if (!ops || !ops->callbacks.gro_receive) {
214 		__pskb_pull(skb, skb_gro_offset(skb));
215 		skb_gro_frag0_invalidate(skb);
216 		proto = ipv6_gso_pull_exthdrs(skb, proto);
217 		skb_gro_pull(skb, -skb_transport_offset(skb));
218 		skb_reset_transport_header(skb);
219 		__skb_push(skb, skb_gro_offset(skb));
220 
221 		ops = rcu_dereference(inet6_offloads[proto]);
222 		if (!ops || !ops->callbacks.gro_receive)
223 			goto out_unlock;
224 
225 		iph = ipv6_hdr(skb);
226 	}
227 
228 	NAPI_GRO_CB(skb)->proto = proto;
229 
230 	flush--;
231 	nlen = skb_network_header_len(skb);
232 
233 	list_for_each_entry(p, head, list) {
234 		const struct ipv6hdr *iph2;
235 		__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
236 
237 		if (!NAPI_GRO_CB(p)->same_flow)
238 			continue;
239 
240 		iph2 = (struct ipv6hdr *)(p->data + off);
241 		first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
242 
243 		/* All fields must match except length and Traffic Class.
244 		 * XXX skbs on the gro_list have all been parsed and pulled
245 		 * already so we don't need to compare nlen
246 		 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
247 		 * memcmp() alone below is sufficient, right?
248 		 */
249 		 if ((first_word & htonl(0xF00FFFFF)) ||
250 		    !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
251 		    !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
252 		    *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
253 not_same_flow:
254 			NAPI_GRO_CB(p)->same_flow = 0;
255 			continue;
256 		}
257 		if (unlikely(nlen > sizeof(struct ipv6hdr))) {
258 			if (memcmp(iph + 1, iph2 + 1,
259 				   nlen - sizeof(struct ipv6hdr)))
260 				goto not_same_flow;
261 		}
262 		/* flush if Traffic Class fields are different */
263 		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
264 		NAPI_GRO_CB(p)->flush |= flush;
265 
266 		/* If the previous IP ID value was based on an atomic
267 		 * datagram we can overwrite the value and ignore it.
268 		 */
269 		if (NAPI_GRO_CB(skb)->is_atomic)
270 			NAPI_GRO_CB(p)->flush_id = 0;
271 	}
272 
273 	NAPI_GRO_CB(skb)->is_atomic = true;
274 	NAPI_GRO_CB(skb)->flush |= flush;
275 
276 	skb_gro_postpull_rcsum(skb, iph, nlen);
277 
278 	pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
279 					 ops->callbacks.gro_receive, head, skb);
280 
281 out_unlock:
282 	rcu_read_unlock();
283 
284 out:
285 	skb_gro_flush_final(skb, pp, flush);
286 
287 	return pp;
288 }
289 
290 static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
291 					      struct sk_buff *skb)
292 {
293 	/* Common GRO receive for SIT and IP6IP6 */
294 
295 	if (NAPI_GRO_CB(skb)->encap_mark) {
296 		NAPI_GRO_CB(skb)->flush = 1;
297 		return NULL;
298 	}
299 
300 	NAPI_GRO_CB(skb)->encap_mark = 1;
301 
302 	return ipv6_gro_receive(head, skb);
303 }
304 
305 static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
306 					  struct sk_buff *skb)
307 {
308 	/* Common GRO receive for SIT and IP6IP6 */
309 
310 	if (NAPI_GRO_CB(skb)->encap_mark) {
311 		NAPI_GRO_CB(skb)->flush = 1;
312 		return NULL;
313 	}
314 
315 	NAPI_GRO_CB(skb)->encap_mark = 1;
316 
317 	return inet_gro_receive(head, skb);
318 }
319 
320 INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
321 {
322 	const struct net_offload *ops;
323 	struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
324 	int err = -ENOSYS;
325 
326 	if (skb->encapsulation) {
327 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
328 		skb_set_inner_network_header(skb, nhoff);
329 	}
330 
331 	iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
332 
333 	rcu_read_lock();
334 
335 	nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
336 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
337 		goto out_unlock;
338 
339 	err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
340 			       udp6_gro_complete, skb, nhoff);
341 
342 out_unlock:
343 	rcu_read_unlock();
344 
345 	return err;
346 }
347 
348 static int sit_gro_complete(struct sk_buff *skb, int nhoff)
349 {
350 	skb->encapsulation = 1;
351 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
352 	return ipv6_gro_complete(skb, nhoff);
353 }
354 
355 static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff)
356 {
357 	skb->encapsulation = 1;
358 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
359 	return ipv6_gro_complete(skb, nhoff);
360 }
361 
362 static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
363 {
364 	skb->encapsulation = 1;
365 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
366 	return inet_gro_complete(skb, nhoff);
367 }
368 
369 static struct packet_offload ipv6_packet_offload __read_mostly = {
370 	.type = cpu_to_be16(ETH_P_IPV6),
371 	.callbacks = {
372 		.gso_segment = ipv6_gso_segment,
373 		.gro_receive = ipv6_gro_receive,
374 		.gro_complete = ipv6_gro_complete,
375 	},
376 };
377 
378 static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
379 				       netdev_features_t features)
380 {
381 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
382 		return ERR_PTR(-EINVAL);
383 
384 	return ipv6_gso_segment(skb, features);
385 }
386 
387 static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
388 					  netdev_features_t features)
389 {
390 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
391 		return ERR_PTR(-EINVAL);
392 
393 	return inet_gso_segment(skb, features);
394 }
395 
396 static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
397 					  netdev_features_t features)
398 {
399 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
400 		return ERR_PTR(-EINVAL);
401 
402 	return ipv6_gso_segment(skb, features);
403 }
404 
405 static const struct net_offload sit_offload = {
406 	.callbacks = {
407 		.gso_segment	= sit_gso_segment,
408 		.gro_receive    = sit_ip6ip6_gro_receive,
409 		.gro_complete   = sit_gro_complete,
410 	},
411 };
412 
413 static const struct net_offload ip4ip6_offload = {
414 	.callbacks = {
415 		.gso_segment	= ip4ip6_gso_segment,
416 		.gro_receive    = ip4ip6_gro_receive,
417 		.gro_complete   = ip4ip6_gro_complete,
418 	},
419 };
420 
421 static const struct net_offload ip6ip6_offload = {
422 	.callbacks = {
423 		.gso_segment	= ip6ip6_gso_segment,
424 		.gro_receive    = sit_ip6ip6_gro_receive,
425 		.gro_complete   = ip6ip6_gro_complete,
426 	},
427 };
428 static int __init ipv6_offload_init(void)
429 {
430 
431 	if (tcpv6_offload_init() < 0)
432 		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
433 	if (ipv6_exthdrs_offload_init() < 0)
434 		pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
435 
436 	dev_add_offload(&ipv6_packet_offload);
437 
438 	inet_add_offload(&sit_offload, IPPROTO_IPV6);
439 	inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
440 	inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP);
441 
442 	return 0;
443 }
444 
445 fs_initcall(ipv6_offload_init);
446