xref: /linux/net/ipv6/esp6_offload.c (revision 40d269c000bda9fcd276a0412a9cebd3f6e344c5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * IPV6 GSO/GRO offload support
4  * Linux INET implementation
5  *
6  * Copyright (C) 2016 secunet Security Networks AG
7  * Author: Steffen Klassert <steffen.klassert@secunet.com>
8  *
9  * ESP GRO support
10  */
11 
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <net/gro.h>
20 #include <net/gso.h>
21 #include <net/ip.h>
22 #include <net/xfrm.h>
23 #include <net/esp.h>
24 #include <linux/scatterlist.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <net/ip6_route.h>
29 #include <net/ipv6.h>
30 #include <linux/icmpv6.h>
31 
32 static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
33 {
34 	int off = sizeof(struct ipv6hdr);
35 	struct ipv6_opt_hdr *exthdr;
36 
37 	/* ESP or ESPINUDP */
38 	if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP ||
39 		   ipv6_hdr->nexthdr == NEXTHDR_UDP))
40 		return offsetof(struct ipv6hdr, nexthdr);
41 
42 	while (off < nhlen) {
43 		exthdr = (void *)ipv6_hdr + off;
44 		if (exthdr->nexthdr == NEXTHDR_ESP)
45 			return off;
46 
47 		off += ipv6_optlen(exthdr);
48 	}
49 
50 	return 0;
51 }
52 
53 static struct sk_buff *esp6_gro_receive(struct list_head *head,
54 					struct sk_buff *skb)
55 {
56 	int offset = skb_gro_offset(skb);
57 	struct xfrm_offload *xo;
58 	struct xfrm_state *x;
59 	int encap_type = 0;
60 	__be32 seq;
61 	__be32 spi;
62 	int nhoff;
63 
64 	if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP)
65 		encap_type = UDP_ENCAP_ESPINUDP;
66 
67 	if (!pskb_pull(skb, offset))
68 		return NULL;
69 
70 	if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
71 		goto out;
72 
73 	xo = xfrm_offload(skb);
74 	if (!xo || !(xo->flags & CRYPTO_DONE)) {
75 		struct sec_path *sp = secpath_set(skb);
76 
77 		if (!sp)
78 			goto out;
79 
80 		if (sp->len == XFRM_MAX_DEPTH)
81 			goto out_reset;
82 
83 		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
84 				      (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
85 				      spi, IPPROTO_ESP, AF_INET6);
86 		if (!x)
87 			goto out_reset;
88 
89 		skb->mark = xfrm_smark_get(skb->mark, x);
90 
91 		sp->xvec[sp->len++] = x;
92 		sp->olen++;
93 
94 		xo = xfrm_offload(skb);
95 		if (!xo)
96 			goto out_reset;
97 	}
98 
99 	xo->flags |= XFRM_GRO;
100 
101 	nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
102 	if (!nhoff)
103 		goto out;
104 
105 	IP6CB(skb)->nhoff = nhoff;
106 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
107 	XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
108 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
109 	XFRM_SPI_SKB_CB(skb)->seq = seq;
110 
111 	/* We don't need to handle errors from xfrm_input, it does all
112 	 * the error handling and frees the resources on error. */
113 	xfrm_input(skb, IPPROTO_ESP, spi, encap_type);
114 
115 	return ERR_PTR(-EINPROGRESS);
116 out_reset:
117 	secpath_reset(skb);
118 out:
119 	skb_push(skb, offset);
120 	NAPI_GRO_CB(skb)->same_flow = 0;
121 	NAPI_GRO_CB(skb)->flush = 1;
122 
123 	return NULL;
124 }
125 
126 static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
127 {
128 	struct ip_esp_hdr *esph;
129 	struct ipv6hdr *iph = ipv6_hdr(skb);
130 	struct xfrm_offload *xo = xfrm_offload(skb);
131 	u8 proto = iph->nexthdr;
132 
133 	skb_push(skb, -skb_network_offset(skb));
134 
135 	if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) {
136 		__be16 frag;
137 
138 		ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
139 	}
140 
141 	esph = ip_esp_hdr(skb);
142 	*skb_mac_header(skb) = IPPROTO_ESP;
143 
144 	esph->spi = x->id.spi;
145 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
146 
147 	xo->proto = proto;
148 }
149 
150 static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
151 						struct sk_buff *skb,
152 						netdev_features_t features)
153 {
154 	__be16 type = x->inner_mode.family == AF_INET ? htons(ETH_P_IP)
155 						      : htons(ETH_P_IPV6);
156 
157 	return skb_eth_gso_segment(skb, features, type);
158 }
159 
160 static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
161 						   struct sk_buff *skb,
162 						   netdev_features_t features)
163 {
164 	const struct net_offload *ops;
165 	struct sk_buff *segs = ERR_PTR(-EINVAL);
166 	struct xfrm_offload *xo = xfrm_offload(skb);
167 
168 	skb->transport_header += x->props.header_len;
169 	ops = rcu_dereference(inet6_offloads[xo->proto]);
170 	if (likely(ops && ops->callbacks.gso_segment))
171 		segs = ops->callbacks.gso_segment(skb, features);
172 
173 	return segs;
174 }
175 
176 static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
177 					      struct sk_buff *skb,
178 					      netdev_features_t features)
179 {
180 	struct xfrm_offload *xo = xfrm_offload(skb);
181 	struct sk_buff *segs = ERR_PTR(-EINVAL);
182 	const struct net_offload *ops;
183 	u8 proto = xo->proto;
184 
185 	skb->transport_header += x->props.header_len;
186 
187 	if (x->sel.family != AF_INET6) {
188 		skb->transport_header -=
189 			(sizeof(struct ipv6hdr) - sizeof(struct iphdr));
190 
191 		if (proto == IPPROTO_BEETPH) {
192 			struct ip_beet_phdr *ph =
193 				(struct ip_beet_phdr *)skb->data;
194 
195 			skb->transport_header += ph->hdrlen * 8;
196 			proto = ph->nexthdr;
197 		} else {
198 			skb->transport_header -= IPV4_BEET_PHMAXLEN;
199 		}
200 
201 		if (proto == IPPROTO_TCP)
202 			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
203 	} else {
204 		__be16 frag;
205 
206 		skb->transport_header +=
207 			ipv6_skip_exthdr(skb, 0, &proto, &frag);
208 	}
209 
210 	if (proto == IPPROTO_IPIP)
211 		skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
212 
213 	__skb_pull(skb, skb_transport_offset(skb));
214 	ops = rcu_dereference(inet6_offloads[proto]);
215 	if (likely(ops && ops->callbacks.gso_segment))
216 		segs = ops->callbacks.gso_segment(skb, features);
217 
218 	return segs;
219 }
220 
221 static struct sk_buff *xfrm6_outer_mode_gso_segment(struct xfrm_state *x,
222 						    struct sk_buff *skb,
223 						    netdev_features_t features)
224 {
225 	switch (x->outer_mode.encap) {
226 	case XFRM_MODE_TUNNEL:
227 		return xfrm6_tunnel_gso_segment(x, skb, features);
228 	case XFRM_MODE_TRANSPORT:
229 		return xfrm6_transport_gso_segment(x, skb, features);
230 	case XFRM_MODE_BEET:
231 		return xfrm6_beet_gso_segment(x, skb, features);
232 	}
233 
234 	return ERR_PTR(-EOPNOTSUPP);
235 }
236 
237 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
238 				        netdev_features_t features)
239 {
240 	struct xfrm_state *x;
241 	struct ip_esp_hdr *esph;
242 	struct crypto_aead *aead;
243 	netdev_features_t esp_features = features;
244 	struct xfrm_offload *xo = xfrm_offload(skb);
245 	struct sec_path *sp;
246 
247 	if (!xo)
248 		return ERR_PTR(-EINVAL);
249 
250 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
251 		return ERR_PTR(-EINVAL);
252 
253 	sp = skb_sec_path(skb);
254 	x = sp->xvec[sp->len - 1];
255 	aead = x->data;
256 	esph = ip_esp_hdr(skb);
257 
258 	if (esph->spi != x->id.spi)
259 		return ERR_PTR(-EINVAL);
260 
261 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
262 		return ERR_PTR(-EINVAL);
263 
264 	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
265 
266 	skb->encap_hdr_csum = 1;
267 
268 	if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
269 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
270 					    NETIF_F_SCTP_CRC);
271 	else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
272 		esp_features = features & ~(NETIF_F_CSUM_MASK |
273 					    NETIF_F_SCTP_CRC);
274 
275 	xo->flags |= XFRM_GSO_SEGMENT;
276 
277 	return xfrm6_outer_mode_gso_segment(x, skb, esp_features);
278 }
279 
280 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
281 {
282 	struct crypto_aead *aead = x->data;
283 	struct xfrm_offload *xo = xfrm_offload(skb);
284 
285 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
286 		return -EINVAL;
287 
288 	if (!(xo->flags & CRYPTO_DONE))
289 		skb->ip_summed = CHECKSUM_NONE;
290 
291 	return esp6_input_done2(skb, 0);
292 }
293 
294 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
295 {
296 	int len;
297 	int err;
298 	int alen;
299 	int blksize;
300 	struct xfrm_offload *xo;
301 	struct crypto_aead *aead;
302 	struct esp_info esp;
303 	bool hw_offload = true;
304 	__u32 seq;
305 
306 	esp.inplace = true;
307 
308 	xo = xfrm_offload(skb);
309 
310 	if (!xo)
311 		return -EINVAL;
312 
313 	if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
314 		xo->flags |= CRYPTO_FALLBACK;
315 		hw_offload = false;
316 	}
317 
318 	esp.proto = xo->proto;
319 
320 	/* skb is pure payload to encrypt */
321 
322 	aead = x->data;
323 	alen = crypto_aead_authsize(aead);
324 
325 	esp.tfclen = 0;
326 	/* XXX: Add support for tfc padding here. */
327 
328 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
329 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
330 	esp.plen = esp.clen - skb->len - esp.tfclen;
331 	esp.tailen = esp.tfclen + esp.plen + alen;
332 
333 	if (!hw_offload || !skb_is_gso(skb)) {
334 		esp.nfrags = esp6_output_head(x, skb, &esp);
335 		if (esp.nfrags < 0)
336 			return esp.nfrags;
337 	}
338 
339 	seq = xo->seq.low;
340 
341 	esp.esph = ip_esp_hdr(skb);
342 	esp.esph->spi = x->id.spi;
343 
344 	skb_push(skb, -skb_network_offset(skb));
345 
346 	if (xo->flags & XFRM_GSO_SEGMENT) {
347 		esp.esph->seq_no = htonl(seq);
348 
349 		if (!skb_is_gso(skb))
350 			xo->seq.low++;
351 		else
352 			xo->seq.low += skb_shinfo(skb)->gso_segs;
353 	}
354 
355 	if (xo->seq.low < seq)
356 		xo->seq.hi++;
357 
358 	esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
359 
360 	len = skb->len - sizeof(struct ipv6hdr);
361 	if (len > IPV6_MAXPLEN)
362 		len = 0;
363 
364 	ipv6_hdr(skb)->payload_len = htons(len);
365 
366 	if (hw_offload) {
367 		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
368 			return -ENOMEM;
369 
370 		xo = xfrm_offload(skb);
371 		if (!xo)
372 			return -EINVAL;
373 
374 		xo->flags |= XFRM_XMIT;
375 		return 0;
376 	}
377 
378 	err = esp6_output_tail(x, skb, &esp);
379 	if (err)
380 		return err;
381 
382 	secpath_reset(skb);
383 
384 	if (skb_needs_linearize(skb, skb->dev->features) &&
385 	    __skb_linearize(skb))
386 		return -ENOMEM;
387 	return 0;
388 }
389 
390 static const struct net_offload esp6_offload = {
391 	.callbacks = {
392 		.gro_receive = esp6_gro_receive,
393 		.gso_segment = esp6_gso_segment,
394 	},
395 };
396 
397 static const struct xfrm_type_offload esp6_type_offload = {
398 	.owner		= THIS_MODULE,
399 	.proto	     	= IPPROTO_ESP,
400 	.input_tail	= esp6_input_tail,
401 	.xmit		= esp6_xmit,
402 	.encap		= esp6_gso_encap,
403 };
404 
405 static int __init esp6_offload_init(void)
406 {
407 	if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) {
408 		pr_info("%s: can't add xfrm type offload\n", __func__);
409 		return -EAGAIN;
410 	}
411 
412 	return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
413 }
414 
415 static void __exit esp6_offload_exit(void)
416 {
417 	xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6);
418 	inet6_del_offload(&esp6_offload, IPPROTO_ESP);
419 }
420 
421 module_init(esp6_offload_init);
422 module_exit(esp6_offload_exit);
423 MODULE_LICENSE("GPL");
424 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
425 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);
426 MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");
427