1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
8 *
9 * ESP GRO support
10 */
11
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <net/gro.h>
20 #include <net/gso.h>
21 #include <net/ip.h>
22 #include <net/xfrm.h>
23 #include <net/esp.h>
24 #include <linux/scatterlist.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <net/udp.h>
29
esp4_gro_receive(struct list_head * head,struct sk_buff * skb)30 static struct sk_buff *esp4_gro_receive(struct list_head *head,
31 struct sk_buff *skb)
32 {
33 int offset = skb_gro_offset(skb);
34 struct xfrm_offload *xo;
35 struct xfrm_state *x;
36 int encap_type = 0;
37 __be32 seq;
38 __be32 spi;
39
40 if (!pskb_pull(skb, offset))
41 return NULL;
42
43 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
44 goto out;
45
46 xo = xfrm_offload(skb);
47 if (!xo || !(xo->flags & CRYPTO_DONE)) {
48 struct sec_path *sp = secpath_set(skb);
49
50 if (!sp)
51 goto out;
52
53 if (sp->len == XFRM_MAX_DEPTH)
54 goto out_reset;
55
56 x = xfrm_input_state_lookup(dev_net(skb->dev), skb->mark,
57 (xfrm_address_t *)&ip_hdr(skb)->daddr,
58 spi, IPPROTO_ESP, AF_INET);
59
60 if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
61 /* non-offload path will record the error and audit log */
62 xfrm_state_put(x);
63 x = NULL;
64 }
65
66 if (!x)
67 goto out_reset;
68
69 skb->mark = xfrm_smark_get(skb->mark, x);
70
71 sp->xvec[sp->len++] = x;
72 sp->olen++;
73
74 xo = xfrm_offload(skb);
75 if (!xo)
76 goto out_reset;
77 }
78
79 xo->flags |= XFRM_GRO;
80
81 if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP)
82 encap_type = UDP_ENCAP_ESPINUDP;
83
84 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
85 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
86 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
87 XFRM_SPI_SKB_CB(skb)->seq = seq;
88
89 /* We don't need to handle errors from xfrm_input, it does all
90 * the error handling and frees the resources on error. */
91 xfrm_input(skb, IPPROTO_ESP, spi, encap_type);
92
93 return ERR_PTR(-EINPROGRESS);
94 out_reset:
95 secpath_reset(skb);
96 out:
97 skb_push(skb, offset);
98 NAPI_GRO_CB(skb)->same_flow = 0;
99 NAPI_GRO_CB(skb)->flush = 1;
100
101 return NULL;
102 }
103
esp4_gso_encap(struct xfrm_state * x,struct sk_buff * skb)104 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
105 {
106 struct ip_esp_hdr *esph;
107 struct iphdr *iph = ip_hdr(skb);
108 struct xfrm_offload *xo = xfrm_offload(skb);
109 int proto = iph->protocol;
110
111 skb_push(skb, -skb_network_offset(skb));
112 esph = ip_esp_hdr(skb);
113 *skb_mac_header(skb) = IPPROTO_ESP;
114
115 esph->spi = x->id.spi;
116 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
117
118 xo->proto = proto;
119 }
120
xfrm4_tunnel_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)121 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
122 struct sk_buff *skb,
123 netdev_features_t features)
124 {
125 const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
126 XFRM_MODE_SKB_CB(skb)->protocol);
127 __be16 type = inner_mode->family == AF_INET6 ? htons(ETH_P_IPV6)
128 : htons(ETH_P_IP);
129
130 return skb_eth_gso_segment(skb, features, type);
131 }
132
xfrm4_transport_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)133 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
134 struct sk_buff *skb,
135 netdev_features_t features)
136 {
137 const struct net_offload *ops;
138 struct sk_buff *segs = ERR_PTR(-EINVAL);
139 struct xfrm_offload *xo = xfrm_offload(skb);
140
141 skb->transport_header += x->props.header_len;
142 ops = rcu_dereference(inet_offloads[xo->proto]);
143 if (likely(ops && ops->callbacks.gso_segment))
144 segs = ops->callbacks.gso_segment(skb, features);
145
146 return segs;
147 }
148
xfrm4_beet_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)149 static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
150 struct sk_buff *skb,
151 netdev_features_t features)
152 {
153 struct xfrm_offload *xo = xfrm_offload(skb);
154 struct sk_buff *segs = ERR_PTR(-EINVAL);
155 const struct net_offload *ops;
156 u8 proto = xo->proto;
157
158 skb->transport_header += x->props.header_len;
159
160 if (x->sel.family != AF_INET6) {
161 if (proto == IPPROTO_BEETPH) {
162 struct ip_beet_phdr *ph =
163 (struct ip_beet_phdr *)skb->data;
164
165 skb->transport_header += ph->hdrlen * 8;
166 proto = ph->nexthdr;
167 } else {
168 skb->transport_header -= IPV4_BEET_PHMAXLEN;
169 }
170 } else {
171 __be16 frag;
172
173 skb->transport_header +=
174 ipv6_skip_exthdr(skb, 0, &proto, &frag);
175 if (proto == IPPROTO_TCP)
176 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
177 }
178
179 if (proto == IPPROTO_IPV6)
180 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
181
182 __skb_pull(skb, skb_transport_offset(skb));
183 ops = rcu_dereference(inet_offloads[proto]);
184 if (likely(ops && ops->callbacks.gso_segment))
185 segs = ops->callbacks.gso_segment(skb, features);
186
187 return segs;
188 }
189
xfrm4_outer_mode_gso_segment(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)190 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
191 struct sk_buff *skb,
192 netdev_features_t features)
193 {
194 switch (x->outer_mode.encap) {
195 case XFRM_MODE_TUNNEL:
196 return xfrm4_tunnel_gso_segment(x, skb, features);
197 case XFRM_MODE_TRANSPORT:
198 return xfrm4_transport_gso_segment(x, skb, features);
199 case XFRM_MODE_BEET:
200 return xfrm4_beet_gso_segment(x, skb, features);
201 }
202
203 return ERR_PTR(-EOPNOTSUPP);
204 }
205
esp4_gso_segment(struct sk_buff * skb,netdev_features_t features)206 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
207 netdev_features_t features)
208 {
209 struct xfrm_state *x;
210 struct ip_esp_hdr *esph;
211 struct crypto_aead *aead;
212 netdev_features_t esp_features = features;
213 struct xfrm_offload *xo = xfrm_offload(skb);
214 struct sec_path *sp;
215
216 if (!xo)
217 return ERR_PTR(-EINVAL);
218
219 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
220 return ERR_PTR(-EINVAL);
221
222 sp = skb_sec_path(skb);
223 x = sp->xvec[sp->len - 1];
224 aead = x->data;
225 esph = ip_esp_hdr(skb);
226
227 if (esph->spi != x->id.spi)
228 return ERR_PTR(-EINVAL);
229
230 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
231 return ERR_PTR(-EINVAL);
232
233 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
234
235 skb->encap_hdr_csum = 1;
236
237 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
238 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
239 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
240 NETIF_F_SCTP_CRC);
241 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
242 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
243 esp_features = features & ~(NETIF_F_CSUM_MASK |
244 NETIF_F_SCTP_CRC);
245
246 xo->flags |= XFRM_GSO_SEGMENT;
247
248 return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
249 }
250
esp_input_tail(struct xfrm_state * x,struct sk_buff * skb)251 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
252 {
253 struct crypto_aead *aead = x->data;
254 struct xfrm_offload *xo = xfrm_offload(skb);
255
256 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
257 return -EINVAL;
258
259 if (!(xo->flags & CRYPTO_DONE))
260 skb->ip_summed = CHECKSUM_NONE;
261
262 return esp_input_done2(skb, 0);
263 }
264
esp_xmit(struct xfrm_state * x,struct sk_buff * skb,netdev_features_t features)265 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
266 {
267 int err;
268 int alen;
269 int blksize;
270 struct xfrm_offload *xo;
271 struct ip_esp_hdr *esph;
272 struct crypto_aead *aead;
273 struct esp_info esp;
274 bool hw_offload = true;
275 __u32 seq;
276 int encap_type = 0;
277
278 esp.inplace = true;
279
280 xo = xfrm_offload(skb);
281
282 if (!xo)
283 return -EINVAL;
284
285 if ((!(features & NETIF_F_HW_ESP) &&
286 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
287 x->xso.dev != skb->dev) {
288 xo->flags |= CRYPTO_FALLBACK;
289 hw_offload = false;
290 }
291
292 esp.proto = xo->proto;
293
294 /* skb is pure payload to encrypt */
295
296 aead = x->data;
297 alen = crypto_aead_authsize(aead);
298
299 esp.tfclen = 0;
300 /* XXX: Add support for tfc padding here. */
301
302 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
303 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
304 esp.plen = esp.clen - skb->len - esp.tfclen;
305 esp.tailen = esp.tfclen + esp.plen + alen;
306
307 esp.esph = ip_esp_hdr(skb);
308
309 if (x->encap)
310 encap_type = x->encap->encap_type;
311
312 if (!hw_offload || !skb_is_gso(skb) || (hw_offload && encap_type == UDP_ENCAP_ESPINUDP)) {
313 esp.nfrags = esp_output_head(x, skb, &esp);
314 if (esp.nfrags < 0)
315 return esp.nfrags;
316 }
317
318 seq = xo->seq.low;
319
320 esph = esp.esph;
321 esph->spi = x->id.spi;
322
323 skb_push(skb, -skb_network_offset(skb));
324
325 if (xo->flags & XFRM_GSO_SEGMENT) {
326 esph->seq_no = htonl(seq);
327
328 if (!skb_is_gso(skb))
329 xo->seq.low++;
330 else
331 xo->seq.low += skb_shinfo(skb)->gso_segs;
332 }
333
334 if (xo->seq.low < seq)
335 xo->seq.hi++;
336
337 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
338
339 if (hw_offload && encap_type == UDP_ENCAP_ESPINUDP) {
340 /* In the XFRM stack, the encapsulation protocol is set to iphdr->protocol by
341 * setting *skb_mac_header(skb) (see esp_output_udp_encap()) where skb->mac_header
342 * points to iphdr->protocol (see xfrm4_tunnel_encap_add()).
343 * However, in esp_xmit(), skb->mac_header doesn't point to iphdr->protocol.
344 * Therefore, the protocol field needs to be corrected.
345 */
346 ip_hdr(skb)->protocol = IPPROTO_UDP;
347
348 esph->seq_no = htonl(seq);
349 }
350
351 ip_hdr(skb)->tot_len = htons(skb->len);
352 ip_send_check(ip_hdr(skb));
353
354 if (hw_offload) {
355 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
356 return -ENOMEM;
357
358 xo = xfrm_offload(skb);
359 if (!xo)
360 return -EINVAL;
361
362 xo->flags |= XFRM_XMIT;
363 return 0;
364 }
365
366 err = esp_output_tail(x, skb, &esp);
367 if (err)
368 return err;
369
370 secpath_reset(skb);
371
372 if (skb_needs_linearize(skb, skb->dev->features) &&
373 __skb_linearize(skb))
374 return -ENOMEM;
375 return 0;
376 }
377
378 static const struct net_offload esp4_offload = {
379 .callbacks = {
380 .gro_receive = esp4_gro_receive,
381 .gso_segment = esp4_gso_segment,
382 },
383 };
384
385 static const struct xfrm_type_offload esp_type_offload = {
386 .owner = THIS_MODULE,
387 .proto = IPPROTO_ESP,
388 .input_tail = esp_input_tail,
389 .xmit = esp_xmit,
390 .encap = esp4_gso_encap,
391 };
392
esp4_offload_init(void)393 static int __init esp4_offload_init(void)
394 {
395 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
396 pr_info("%s: can't add xfrm type offload\n", __func__);
397 return -EAGAIN;
398 }
399
400 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
401 }
402
esp4_offload_exit(void)403 static void __exit esp4_offload_exit(void)
404 {
405 xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
406 inet_del_offload(&esp4_offload, IPPROTO_ESP);
407 }
408
409 module_init(esp4_offload_init);
410 module_exit(esp4_offload_exit);
411 MODULE_LICENSE("GPL");
412 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
413 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
414 MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
415