1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * IPV6 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
8 *
9 * ESP GRO support
10 */
11
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <net/gro.h>
20 #include <net/gso.h>
21 #include <net/ip.h>
22 #include <net/xfrm.h>
23 #include <net/esp.h>
24 #include <linux/scatterlist.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <net/ip6_route.h>
29 #include <net/ipv6.h>
30 #include <linux/icmpv6.h>
31
esp6_nexthdr_esp_offset(struct ipv6hdr * ipv6_hdr,int nhlen)32 static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
33 {
34 int off = sizeof(struct ipv6hdr);
35 struct ipv6_opt_hdr *exthdr;
36
37 /* ESP or ESPINUDP */
38 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP ||
39 ipv6_hdr->nexthdr == NEXTHDR_UDP))
40 return offsetof(struct ipv6hdr, nexthdr);
41
42 while (off < nhlen) {
43 exthdr = (void *)ipv6_hdr + off;
44 if (exthdr->nexthdr == NEXTHDR_ESP)
45 return off;
46
47 off += ipv6_optlen(exthdr);
48 }
49
50 return 0;
51 }
52
esp6_gro_receive(struct list_head * head,struct sk_buff * skb)53 static struct sk_buff *esp6_gro_receive(struct list_head *head,
54 struct sk_buff *skb)
55 {
56 int offset = skb_gro_offset(skb);
57 struct xfrm_offload *xo;
58 struct xfrm_state *x;
59 int encap_type = 0;
60 __be32 seq;
61 __be32 spi;
62 int nhoff;
63
64 if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP)
65 encap_type = UDP_ENCAP_ESPINUDP;
66
67 if (!pskb_pull(skb, offset))
68 return NULL;
69
70 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
71 goto out;
72
73 xo = xfrm_offload(skb);
74 if (!xo || !(xo->flags & CRYPTO_DONE)) {
75 struct sec_path *sp = secpath_set(skb);
76
77 if (!sp)
78 goto out;
79
80 if (sp->len == XFRM_MAX_DEPTH)
81 goto out_reset;
82
83 x = xfrm_input_state_lookup(dev_net(skb->dev), skb->mark,
84 (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
85 spi, IPPROTO_ESP, AF_INET6);
86
87 if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
88 /* non-offload path will record the error and audit log */
89 xfrm_state_put(x);
90 x = NULL;
91 }
92
93 if (!x)
94 goto out_reset;
95
96 skb->mark = xfrm_smark_get(skb->mark, x);
97
98 sp->xvec[sp->len++] = x;
99 sp->olen++;
100
101 xo = xfrm_offload(skb);
102 if (!xo)
103 goto out_reset;
104 }
105
106 xo->flags |= XFRM_GRO;
107
108 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
109 if (!nhoff)
110 goto out;
111
112 IP6CB(skb)->nhoff = nhoff;
113 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
114 XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
115 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
116 XFRM_SPI_SKB_CB(skb)->seq = seq;
117
118 /* We don't need to handle errors from xfrm_input, it does all
119 * the error handling and frees the resources on error. */
120 xfrm_input(skb, IPPROTO_ESP, spi, encap_type);
121
122 return ERR_PTR(-EINPROGRESS);
123 out_reset:
124 secpath_reset(skb);
125 out:
126 skb_push(skb, offset);
127 NAPI_GRO_CB(skb)->same_flow = 0;
128 NAPI_GRO_CB(skb)->flush = 1;
129
130 return NULL;
131 }
132
esp6_gso_encap(struct xfrm_state * x,struct sk_buff * skb)133 static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
134 {
135 struct ip_esp_hdr *esph;
136 struct ipv6hdr *iph = ipv6_hdr(skb);
137 struct xfrm_offload *xo = xfrm_offload(skb);
138 u8 proto = iph->nexthdr;
139
140 skb_push(skb, -skb_network_offset(skb));
141
142 if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) {
143 __be16 frag;
144
145 ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
146 }
147
148 esph = ip_esp_hdr(skb);
149 *skb_mac_header(skb) = IPPROTO_ESP;
150
151 esph->spi = x->id.spi;
152 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
153
154 xo->proto = proto;
155 }
156
xfrm6_tunnel_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)157 static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
158 struct sk_buff *skb,
159 netdev_features_t features)
160 {
161 const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
162 XFRM_MODE_SKB_CB(skb)->protocol);
163 __be16 type = inner_mode->family == AF_INET ? htons(ETH_P_IP)
164 : htons(ETH_P_IPV6);
165
166 return skb_eth_gso_segment(skb, features, type);
167 }
168
xfrm6_transport_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)169 static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
170 struct sk_buff *skb,
171 netdev_features_t features)
172 {
173 const struct net_offload *ops;
174 struct sk_buff *segs = ERR_PTR(-EINVAL);
175 struct xfrm_offload *xo = xfrm_offload(skb);
176
177 skb->transport_header += x->props.header_len;
178 ops = rcu_dereference(inet6_offloads[xo->proto]);
179 if (likely(ops && ops->callbacks.gso_segment))
180 segs = ops->callbacks.gso_segment(skb, features);
181
182 return segs;
183 }
184
xfrm6_beet_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)185 static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
186 struct sk_buff *skb,
187 netdev_features_t features)
188 {
189 struct xfrm_offload *xo = xfrm_offload(skb);
190 struct sk_buff *segs = ERR_PTR(-EINVAL);
191 const struct net_offload *ops;
192 u8 proto = xo->proto;
193
194 skb->transport_header += x->props.header_len;
195
196 if (x->sel.family != AF_INET6) {
197 skb->transport_header -=
198 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
199
200 if (proto == IPPROTO_BEETPH) {
201 struct ip_beet_phdr *ph =
202 (struct ip_beet_phdr *)skb->data;
203
204 skb->transport_header += ph->hdrlen * 8;
205 proto = ph->nexthdr;
206 } else {
207 skb->transport_header -= IPV4_BEET_PHMAXLEN;
208 }
209
210 if (proto == IPPROTO_TCP)
211 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
212 } else {
213 __be16 frag;
214
215 skb->transport_header +=
216 ipv6_skip_exthdr(skb, 0, &proto, &frag);
217 }
218
219 if (proto == IPPROTO_IPIP)
220 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
221
222 __skb_pull(skb, skb_transport_offset(skb));
223 ops = rcu_dereference(inet6_offloads[proto]);
224 if (likely(ops && ops->callbacks.gso_segment))
225 segs = ops->callbacks.gso_segment(skb, features);
226
227 return segs;
228 }
229
xfrm6_outer_mode_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)230 static struct sk_buff *xfrm6_outer_mode_gso_segment(struct xfrm_state *x,
231 struct sk_buff *skb,
232 netdev_features_t features)
233 {
234 switch (x->outer_mode.encap) {
235 case XFRM_MODE_TUNNEL:
236 return xfrm6_tunnel_gso_segment(x, skb, features);
237 case XFRM_MODE_TRANSPORT:
238 return xfrm6_transport_gso_segment(x, skb, features);
239 case XFRM_MODE_BEET:
240 return xfrm6_beet_gso_segment(x, skb, features);
241 }
242
243 return ERR_PTR(-EOPNOTSUPP);
244 }
245
esp6_gso_segment(struct sk_buff * skb,netdev_features_t features)246 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
247 netdev_features_t features)
248 {
249 struct xfrm_state *x;
250 struct ip_esp_hdr *esph;
251 struct crypto_aead *aead;
252 netdev_features_t esp_features = features;
253 struct xfrm_offload *xo = xfrm_offload(skb);
254 struct sec_path *sp;
255
256 if (!xo)
257 return ERR_PTR(-EINVAL);
258
259 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
260 return ERR_PTR(-EINVAL);
261
262 sp = skb_sec_path(skb);
263 x = sp->xvec[sp->len - 1];
264 aead = x->data;
265 esph = ip_esp_hdr(skb);
266
267 if (esph->spi != x->id.spi)
268 return ERR_PTR(-EINVAL);
269
270 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
271 return ERR_PTR(-EINVAL);
272
273 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
274
275 skb->encap_hdr_csum = 1;
276
277 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
278 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
279 NETIF_F_SCTP_CRC);
280 else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
281 esp_features = features & ~(NETIF_F_CSUM_MASK |
282 NETIF_F_SCTP_CRC);
283
284 xo->flags |= XFRM_GSO_SEGMENT;
285
286 return xfrm6_outer_mode_gso_segment(x, skb, esp_features);
287 }
288
esp6_input_tail(struct xfrm_state * x,struct sk_buff * skb)289 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
290 {
291 struct crypto_aead *aead = x->data;
292 struct xfrm_offload *xo = xfrm_offload(skb);
293
294 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
295 return -EINVAL;
296
297 if (!(xo->flags & CRYPTO_DONE))
298 skb->ip_summed = CHECKSUM_NONE;
299
300 return esp6_input_done2(skb, 0);
301 }
302
esp6_xmit(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)303 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
304 {
305 int len;
306 int err;
307 int alen;
308 int blksize;
309 struct xfrm_offload *xo;
310 struct crypto_aead *aead;
311 struct esp_info esp;
312 bool hw_offload = true;
313 __u32 seq;
314
315 esp.inplace = true;
316
317 xo = xfrm_offload(skb);
318
319 if (!xo)
320 return -EINVAL;
321
322 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
323 xo->flags |= CRYPTO_FALLBACK;
324 hw_offload = false;
325 }
326
327 esp.proto = xo->proto;
328
329 /* skb is pure payload to encrypt */
330
331 aead = x->data;
332 alen = crypto_aead_authsize(aead);
333
334 esp.tfclen = 0;
335 /* XXX: Add support for tfc padding here. */
336
337 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
338 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
339 esp.plen = esp.clen - skb->len - esp.tfclen;
340 esp.tailen = esp.tfclen + esp.plen + alen;
341
342 if (!hw_offload || !skb_is_gso(skb)) {
343 esp.nfrags = esp6_output_head(x, skb, &esp);
344 if (esp.nfrags < 0)
345 return esp.nfrags;
346 }
347
348 seq = xo->seq.low;
349
350 esp.esph = ip_esp_hdr(skb);
351 esp.esph->spi = x->id.spi;
352
353 skb_push(skb, -skb_network_offset(skb));
354
355 if (xo->flags & XFRM_GSO_SEGMENT) {
356 esp.esph->seq_no = htonl(seq);
357
358 if (!skb_is_gso(skb))
359 xo->seq.low++;
360 else
361 xo->seq.low += skb_shinfo(skb)->gso_segs;
362 }
363
364 if (xo->seq.low < seq)
365 xo->seq.hi++;
366
367 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
368
369 len = skb->len - sizeof(struct ipv6hdr);
370 if (len > IPV6_MAXPLEN)
371 len = 0;
372
373 ipv6_hdr(skb)->payload_len = htons(len);
374
375 if (hw_offload) {
376 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
377 return -ENOMEM;
378
379 xo = xfrm_offload(skb);
380 if (!xo)
381 return -EINVAL;
382
383 xo->flags |= XFRM_XMIT;
384 return 0;
385 }
386
387 err = esp6_output_tail(x, skb, &esp);
388 if (err)
389 return err;
390
391 secpath_reset(skb);
392
393 if (skb_needs_linearize(skb, skb->dev->features) &&
394 __skb_linearize(skb))
395 return -ENOMEM;
396 return 0;
397 }
398
399 static const struct net_offload esp6_offload = {
400 .callbacks = {
401 .gro_receive = esp6_gro_receive,
402 .gso_segment = esp6_gso_segment,
403 },
404 };
405
406 static const struct xfrm_type_offload esp6_type_offload = {
407 .owner = THIS_MODULE,
408 .proto = IPPROTO_ESP,
409 .input_tail = esp6_input_tail,
410 .xmit = esp6_xmit,
411 .encap = esp6_gso_encap,
412 };
413
esp6_offload_init(void)414 static int __init esp6_offload_init(void)
415 {
416 if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) {
417 pr_info("%s: can't add xfrm type offload\n", __func__);
418 return -EAGAIN;
419 }
420
421 return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
422 }
423
esp6_offload_exit(void)424 static void __exit esp6_offload_exit(void)
425 {
426 xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6);
427 inet6_del_offload(&esp6_offload, IPPROTO_ESP);
428 }
429
430 module_init(esp6_offload_init);
431 module_exit(esp6_offload_exit);
432 MODULE_LICENSE("GPL");
433 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
434 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);
435 MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");
436