1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C)2002 USAGI/WIDE Project
4 *
5 * Authors
6 *
7 * Mitsuru KANDA @USAGI : IPv6 Support
8 * Kazunori MIYAZAWA @USAGI :
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 *
11 * This file is derived from net/ipv4/esp.c
12 */
13
14 #define pr_fmt(fmt) "IPv6: " fmt
15
16 #include <crypto/aead.h>
17 #include <crypto/authenc.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
20 #include <net/ip.h>
21 #include <net/xfrm.h>
22 #include <net/esp.h>
23 #include <linux/scatterlist.h>
24 #include <linux/kernel.h>
25 #include <linux/pfkeyv2.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <net/ip6_checksum.h>
30 #include <net/ip6_route.h>
31 #include <net/icmp.h>
32 #include <net/ipv6.h>
33 #include <net/protocol.h>
34 #include <net/udp.h>
35 #include <linux/icmpv6.h>
36 #include <net/tcp.h>
37 #include <net/espintcp.h>
38 #include <net/inet6_hashtables.h>
39 #include <linux/skbuff_ref.h>
40
41 #include <linux/highmem.h>
42
43 struct esp_skb_cb {
44 struct xfrm_skb_cb xfrm;
45 void *tmp;
46 };
47
48 struct esp_output_extra {
49 __be32 seqhi;
50 u32 esphoff;
51 };
52
53 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
54
55 /*
56 * Allocate an AEAD request structure with extra space for SG and IV.
57 *
58 * For alignment considerations the upper 32 bits of the sequence number are
59 * placed at the front, if present. Followed by the IV, the request and finally
60 * the SG list.
61 *
62 * TODO: Use spare space in skb for this where possible.
63 */
esp_alloc_tmp(struct crypto_aead * aead,int nfrags,int seqihlen)64 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
65 {
66 unsigned int len;
67
68 len = seqihlen;
69
70 len += crypto_aead_ivsize(aead);
71
72 if (len) {
73 len += crypto_aead_alignmask(aead) &
74 ~(crypto_tfm_ctx_alignment() - 1);
75 len = ALIGN(len, crypto_tfm_ctx_alignment());
76 }
77
78 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
79 len = ALIGN(len, __alignof__(struct scatterlist));
80
81 len += sizeof(struct scatterlist) * nfrags;
82
83 return kmalloc(len, GFP_ATOMIC);
84 }
85
esp_tmp_extra(void * tmp)86 static inline void *esp_tmp_extra(void *tmp)
87 {
88 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
89 }
90
esp_tmp_iv(struct crypto_aead * aead,void * tmp,int seqhilen)91 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
92 {
93 return crypto_aead_ivsize(aead) ?
94 PTR_ALIGN((u8 *)tmp + seqhilen,
95 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
96 }
97
esp_tmp_req(struct crypto_aead * aead,u8 * iv)98 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
99 {
100 struct aead_request *req;
101
102 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
103 crypto_tfm_ctx_alignment());
104 aead_request_set_tfm(req, aead);
105 return req;
106 }
107
esp_req_sg(struct crypto_aead * aead,struct aead_request * req)108 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
109 struct aead_request *req)
110 {
111 return (void *)ALIGN((unsigned long)(req + 1) +
112 crypto_aead_reqsize(aead),
113 __alignof__(struct scatterlist));
114 }
115
esp_ssg_unref(struct xfrm_state * x,void * tmp,struct sk_buff * skb)116 static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
117 {
118 struct crypto_aead *aead = x->data;
119 int extralen = 0;
120 u8 *iv;
121 struct aead_request *req;
122 struct scatterlist *sg;
123
124 if (x->props.flags & XFRM_STATE_ESN)
125 extralen += sizeof(struct esp_output_extra);
126
127 iv = esp_tmp_iv(aead, tmp, extralen);
128 req = esp_tmp_req(aead, iv);
129
130 /* Unref skb_frag_pages in the src scatterlist if necessary.
131 * Skip the first sg which comes from skb->data.
132 */
133 if (req->src != req->dst)
134 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
135 skb_page_unref(page_to_netmem(sg_page(sg)),
136 skb->pp_recycle);
137 }
138
139 #ifdef CONFIG_INET6_ESPINTCP
esp6_find_tcp_sk(struct xfrm_state * x)140 static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
141 {
142 struct xfrm_encap_tmpl *encap = x->encap;
143 struct net *net = xs_net(x);
144 __be16 sport, dport;
145 struct sock *sk;
146
147 spin_lock_bh(&x->lock);
148 sport = encap->encap_sport;
149 dport = encap->encap_dport;
150 spin_unlock_bh(&x->lock);
151
152 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6,
153 dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
154 if (!sk)
155 return ERR_PTR(-ENOENT);
156
157 if (!tcp_is_ulp_esp(sk)) {
158 sock_put(sk);
159 return ERR_PTR(-EINVAL);
160 }
161
162 return sk;
163 }
164
esp_output_tcp_finish(struct xfrm_state * x,struct sk_buff * skb)165 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
166 {
167 struct sock *sk;
168 int err;
169
170 rcu_read_lock();
171
172 sk = esp6_find_tcp_sk(x);
173 err = PTR_ERR_OR_ZERO(sk);
174 if (err) {
175 kfree_skb(skb);
176 goto out;
177 }
178
179 bh_lock_sock(sk);
180 if (sock_owned_by_user(sk))
181 err = espintcp_queue_out(sk, skb);
182 else
183 err = espintcp_push_skb(sk, skb);
184 bh_unlock_sock(sk);
185
186 sock_put(sk);
187
188 out:
189 rcu_read_unlock();
190 return err;
191 }
192
esp_output_tcp_encap_cb(struct net * net,struct sock * sk,struct sk_buff * skb)193 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
194 struct sk_buff *skb)
195 {
196 struct dst_entry *dst = skb_dst(skb);
197 struct xfrm_state *x = dst->xfrm;
198
199 return esp_output_tcp_finish(x, skb);
200 }
201
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)202 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
203 {
204 int err;
205
206 local_bh_disable();
207 err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
208 local_bh_enable();
209
210 /* EINPROGRESS just happens to do the right thing. It
211 * actually means that the skb has been consumed and
212 * isn't coming back.
213 */
214 return err ?: -EINPROGRESS;
215 }
216 #else
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)217 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
218 {
219 WARN_ON(1);
220 return -EOPNOTSUPP;
221 }
222 #endif
223
esp_output_encap_csum(struct sk_buff * skb)224 static void esp_output_encap_csum(struct sk_buff *skb)
225 {
226 /* UDP encap with IPv6 requires a valid checksum */
227 if (*skb_mac_header(skb) == IPPROTO_UDP) {
228 struct udphdr *uh = udp_hdr(skb);
229 struct ipv6hdr *ip6h = ipv6_hdr(skb);
230 int len = ntohs(uh->len);
231 unsigned int offset = skb_transport_offset(skb);
232 __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
233
234 uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
235 len, IPPROTO_UDP, csum);
236 if (uh->check == 0)
237 uh->check = CSUM_MANGLED_0;
238 }
239 }
240
esp_output_done(void * data,int err)241 static void esp_output_done(void *data, int err)
242 {
243 struct sk_buff *skb = data;
244 struct xfrm_offload *xo = xfrm_offload(skb);
245 void *tmp;
246 struct xfrm_state *x;
247
248 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
249 struct sec_path *sp = skb_sec_path(skb);
250
251 x = sp->xvec[sp->len - 1];
252 } else {
253 x = skb_dst(skb)->xfrm;
254 }
255
256 tmp = ESP_SKB_CB(skb)->tmp;
257 esp_ssg_unref(x, tmp, skb);
258 kfree(tmp);
259
260 esp_output_encap_csum(skb);
261
262 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
263 if (err) {
264 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
265 kfree_skb(skb);
266 return;
267 }
268
269 skb_push(skb, skb->data - skb_mac_header(skb));
270 secpath_reset(skb);
271 xfrm_dev_resume(skb);
272 } else {
273 if (!err &&
274 x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
275 esp_output_tail_tcp(x, skb);
276 else
277 xfrm_output_resume(skb_to_full_sk(skb), skb, err);
278 }
279 }
280
281 /* Move ESP header back into place. */
esp_restore_header(struct sk_buff * skb,unsigned int offset)282 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
283 {
284 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
285 void *tmp = ESP_SKB_CB(skb)->tmp;
286 __be32 *seqhi = esp_tmp_extra(tmp);
287
288 esph->seq_no = esph->spi;
289 esph->spi = *seqhi;
290 }
291
esp_output_restore_header(struct sk_buff * skb)292 static void esp_output_restore_header(struct sk_buff *skb)
293 {
294 void *tmp = ESP_SKB_CB(skb)->tmp;
295 struct esp_output_extra *extra = esp_tmp_extra(tmp);
296
297 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
298 sizeof(__be32));
299 }
300
esp_output_set_esn(struct sk_buff * skb,struct xfrm_state * x,struct ip_esp_hdr * esph,struct esp_output_extra * extra)301 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
302 struct xfrm_state *x,
303 struct ip_esp_hdr *esph,
304 struct esp_output_extra *extra)
305 {
306 /* For ESN we move the header forward by 4 bytes to
307 * accommodate the high bits. We will move it back after
308 * encryption.
309 */
310 if ((x->props.flags & XFRM_STATE_ESN)) {
311 __u32 seqhi;
312 struct xfrm_offload *xo = xfrm_offload(skb);
313
314 if (xo)
315 seqhi = xo->seq.hi;
316 else
317 seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
318
319 extra->esphoff = (unsigned char *)esph -
320 skb_transport_header(skb);
321 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
322 extra->seqhi = esph->spi;
323 esph->seq_no = htonl(seqhi);
324 }
325
326 esph->spi = x->id.spi;
327
328 return esph;
329 }
330
esp_output_done_esn(void * data,int err)331 static void esp_output_done_esn(void *data, int err)
332 {
333 struct sk_buff *skb = data;
334
335 esp_output_restore_header(skb);
336 esp_output_done(data, err);
337 }
338
esp6_output_udp_encap(struct sk_buff * skb,int encap_type,struct esp_info * esp,__be16 sport,__be16 dport)339 static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
340 int encap_type,
341 struct esp_info *esp,
342 __be16 sport,
343 __be16 dport)
344 {
345 struct udphdr *uh;
346 unsigned int len;
347
348 len = skb->len + esp->tailen - skb_transport_offset(skb);
349 if (len > U16_MAX)
350 return ERR_PTR(-EMSGSIZE);
351
352 uh = (struct udphdr *)esp->esph;
353 uh->source = sport;
354 uh->dest = dport;
355 uh->len = htons(len);
356 uh->check = 0;
357
358 *skb_mac_header(skb) = IPPROTO_UDP;
359
360 return (struct ip_esp_hdr *)(uh + 1);
361 }
362
363 #ifdef CONFIG_INET6_ESPINTCP
esp6_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)364 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
365 struct sk_buff *skb,
366 struct esp_info *esp)
367 {
368 __be16 *lenp = (void *)esp->esph;
369 struct ip_esp_hdr *esph;
370 unsigned int len;
371 struct sock *sk;
372
373 len = skb->len + esp->tailen - skb_transport_offset(skb);
374 if (len > IP_MAX_MTU)
375 return ERR_PTR(-EMSGSIZE);
376
377 rcu_read_lock();
378 sk = esp6_find_tcp_sk(x);
379 rcu_read_unlock();
380
381 if (IS_ERR(sk))
382 return ERR_CAST(sk);
383
384 sock_put(sk);
385
386 *lenp = htons(len);
387 esph = (struct ip_esp_hdr *)(lenp + 1);
388
389 return esph;
390 }
391 #else
esp6_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)392 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
393 struct sk_buff *skb,
394 struct esp_info *esp)
395 {
396 return ERR_PTR(-EOPNOTSUPP);
397 }
398 #endif
399
esp6_output_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)400 static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
401 struct esp_info *esp)
402 {
403 struct xfrm_encap_tmpl *encap = x->encap;
404 struct ip_esp_hdr *esph;
405 __be16 sport, dport;
406 int encap_type;
407
408 spin_lock_bh(&x->lock);
409 sport = encap->encap_sport;
410 dport = encap->encap_dport;
411 encap_type = encap->encap_type;
412 spin_unlock_bh(&x->lock);
413
414 switch (encap_type) {
415 default:
416 case UDP_ENCAP_ESPINUDP:
417 esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
418 break;
419 case TCP_ENCAP_ESPINTCP:
420 esph = esp6_output_tcp_encap(x, skb, esp);
421 break;
422 }
423
424 if (IS_ERR(esph))
425 return PTR_ERR(esph);
426
427 esp->esph = esph;
428
429 return 0;
430 }
431
esp6_output_head(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)432 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
433 {
434 u8 *tail;
435 int nfrags;
436 int esph_offset;
437 struct page *page;
438 struct sk_buff *trailer;
439 int tailen = esp->tailen;
440
441 if (x->encap) {
442 int err = esp6_output_encap(x, skb, esp);
443
444 if (err < 0)
445 return err;
446 }
447
448 if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
449 ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
450 goto cow;
451
452 if (!skb_cloned(skb)) {
453 if (tailen <= skb_tailroom(skb)) {
454 nfrags = 1;
455 trailer = skb;
456 tail = skb_tail_pointer(trailer);
457
458 goto skip_cow;
459 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
460 && !skb_has_frag_list(skb)) {
461 int allocsize;
462 struct sock *sk = skb->sk;
463 struct page_frag *pfrag = &x->xfrag;
464
465 esp->inplace = false;
466
467 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
468
469 spin_lock_bh(&x->lock);
470
471 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
472 spin_unlock_bh(&x->lock);
473 goto cow;
474 }
475
476 page = pfrag->page;
477 get_page(page);
478
479 tail = page_address(page) + pfrag->offset;
480
481 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
482
483 nfrags = skb_shinfo(skb)->nr_frags;
484
485 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
486 tailen);
487 skb_shinfo(skb)->nr_frags = ++nfrags;
488
489 pfrag->offset = pfrag->offset + allocsize;
490
491 spin_unlock_bh(&x->lock);
492
493 nfrags++;
494
495 skb->len += tailen;
496 skb->data_len += tailen;
497 skb->truesize += tailen;
498 if (sk && sk_fullsock(sk))
499 refcount_add(tailen, &sk->sk_wmem_alloc);
500
501 goto out;
502 }
503 }
504
505 cow:
506 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
507
508 nfrags = skb_cow_data(skb, tailen, &trailer);
509 if (nfrags < 0)
510 goto out;
511 tail = skb_tail_pointer(trailer);
512 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
513
514 skip_cow:
515 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
516 pskb_put(skb, trailer, tailen);
517
518 out:
519 return nfrags;
520 }
521 EXPORT_SYMBOL_GPL(esp6_output_head);
522
esp6_output_tail(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)523 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
524 {
525 u8 *iv;
526 int alen;
527 void *tmp;
528 int ivlen;
529 int assoclen;
530 int extralen;
531 struct page *page;
532 struct ip_esp_hdr *esph;
533 struct aead_request *req;
534 struct crypto_aead *aead;
535 struct scatterlist *sg, *dsg;
536 struct esp_output_extra *extra;
537 int err = -ENOMEM;
538
539 assoclen = sizeof(struct ip_esp_hdr);
540 extralen = 0;
541
542 if (x->props.flags & XFRM_STATE_ESN) {
543 extralen += sizeof(*extra);
544 assoclen += sizeof(__be32);
545 }
546
547 aead = x->data;
548 alen = crypto_aead_authsize(aead);
549 ivlen = crypto_aead_ivsize(aead);
550
551 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
552 if (!tmp)
553 goto error;
554
555 extra = esp_tmp_extra(tmp);
556 iv = esp_tmp_iv(aead, tmp, extralen);
557 req = esp_tmp_req(aead, iv);
558 sg = esp_req_sg(aead, req);
559
560 if (esp->inplace)
561 dsg = sg;
562 else
563 dsg = &sg[esp->nfrags];
564
565 esph = esp_output_set_esn(skb, x, esp->esph, extra);
566 esp->esph = esph;
567
568 sg_init_table(sg, esp->nfrags);
569 err = skb_to_sgvec(skb, sg,
570 (unsigned char *)esph - skb->data,
571 assoclen + ivlen + esp->clen + alen);
572 if (unlikely(err < 0))
573 goto error_free;
574
575 if (!esp->inplace) {
576 int allocsize;
577 struct page_frag *pfrag = &x->xfrag;
578
579 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
580
581 spin_lock_bh(&x->lock);
582 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
583 spin_unlock_bh(&x->lock);
584 goto error_free;
585 }
586
587 skb_shinfo(skb)->nr_frags = 1;
588
589 page = pfrag->page;
590 get_page(page);
591 /* replace page frags in skb with new page */
592 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
593 pfrag->offset = pfrag->offset + allocsize;
594 spin_unlock_bh(&x->lock);
595
596 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
597 err = skb_to_sgvec(skb, dsg,
598 (unsigned char *)esph - skb->data,
599 assoclen + ivlen + esp->clen + alen);
600 if (unlikely(err < 0))
601 goto error_free;
602 }
603
604 if ((x->props.flags & XFRM_STATE_ESN))
605 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
606 else
607 aead_request_set_callback(req, 0, esp_output_done, skb);
608
609 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
610 aead_request_set_ad(req, assoclen);
611
612 memset(iv, 0, ivlen);
613 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
614 min(ivlen, 8));
615
616 ESP_SKB_CB(skb)->tmp = tmp;
617 err = crypto_aead_encrypt(req);
618
619 switch (err) {
620 case -EINPROGRESS:
621 goto error;
622
623 case -ENOSPC:
624 err = NET_XMIT_DROP;
625 break;
626
627 case 0:
628 if ((x->props.flags & XFRM_STATE_ESN))
629 esp_output_restore_header(skb);
630 esp_output_encap_csum(skb);
631 }
632
633 if (sg != dsg)
634 esp_ssg_unref(x, tmp, skb);
635
636 if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
637 err = esp_output_tail_tcp(x, skb);
638
639 error_free:
640 kfree(tmp);
641 error:
642 return err;
643 }
644 EXPORT_SYMBOL_GPL(esp6_output_tail);
645
esp6_output(struct xfrm_state * x,struct sk_buff * skb)646 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
647 {
648 int alen;
649 int blksize;
650 struct ip_esp_hdr *esph;
651 struct crypto_aead *aead;
652 struct esp_info esp;
653
654 esp.inplace = true;
655
656 esp.proto = *skb_mac_header(skb);
657 *skb_mac_header(skb) = IPPROTO_ESP;
658
659 /* skb is pure payload to encrypt */
660
661 aead = x->data;
662 alen = crypto_aead_authsize(aead);
663
664 esp.tfclen = 0;
665 if (x->tfcpad) {
666 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
667 u32 padto;
668
669 padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
670 if (skb->len < padto)
671 esp.tfclen = padto - skb->len;
672 }
673 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
674 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
675 esp.plen = esp.clen - skb->len - esp.tfclen;
676 esp.tailen = esp.tfclen + esp.plen + alen;
677
678 esp.esph = ip_esp_hdr(skb);
679
680 esp.nfrags = esp6_output_head(x, skb, &esp);
681 if (esp.nfrags < 0)
682 return esp.nfrags;
683
684 esph = esp.esph;
685 esph->spi = x->id.spi;
686
687 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
688 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
689 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
690
691 skb_push(skb, -skb_network_offset(skb));
692
693 return esp6_output_tail(x, skb, &esp);
694 }
695
esp_remove_trailer(struct sk_buff * skb)696 static inline int esp_remove_trailer(struct sk_buff *skb)
697 {
698 struct xfrm_state *x = xfrm_input_state(skb);
699 struct crypto_aead *aead = x->data;
700 int alen, hlen, elen;
701 int padlen, trimlen;
702 __wsum csumdiff;
703 u8 nexthdr[2];
704 int ret;
705
706 alen = crypto_aead_authsize(aead);
707 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
708 elen = skb->len - hlen;
709
710 ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
711 BUG_ON(ret);
712
713 ret = -EINVAL;
714 padlen = nexthdr[0];
715 if (padlen + 2 + alen >= elen) {
716 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
717 padlen + 2, elen - alen);
718 goto out;
719 }
720
721 trimlen = alen + padlen + 2;
722 if (skb->ip_summed == CHECKSUM_COMPLETE) {
723 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
724 skb->csum = csum_block_sub(skb->csum, csumdiff,
725 skb->len - trimlen);
726 }
727 ret = pskb_trim(skb, skb->len - trimlen);
728 if (unlikely(ret))
729 return ret;
730
731 ret = nexthdr[1];
732
733 out:
734 return ret;
735 }
736
esp6_input_done2(struct sk_buff * skb,int err)737 int esp6_input_done2(struct sk_buff *skb, int err)
738 {
739 struct xfrm_state *x = xfrm_input_state(skb);
740 struct xfrm_offload *xo = xfrm_offload(skb);
741 struct crypto_aead *aead = x->data;
742 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
743 int hdr_len = skb_network_header_len(skb);
744
745 if (!xo || !(xo->flags & CRYPTO_DONE))
746 kfree(ESP_SKB_CB(skb)->tmp);
747
748 if (unlikely(err))
749 goto out;
750
751 err = esp_remove_trailer(skb);
752 if (unlikely(err < 0))
753 goto out;
754
755 if (x->encap) {
756 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
757 int offset = skb_network_offset(skb) + sizeof(*ip6h);
758 struct xfrm_encap_tmpl *encap = x->encap;
759 u8 nexthdr = ip6h->nexthdr;
760 __be16 frag_off, source;
761 struct udphdr *uh;
762 struct tcphdr *th;
763
764 offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
765 if (offset == -1) {
766 err = -EINVAL;
767 goto out;
768 }
769
770 uh = (void *)(skb->data + offset);
771 th = (void *)(skb->data + offset);
772 hdr_len += offset;
773
774 switch (x->encap->encap_type) {
775 case TCP_ENCAP_ESPINTCP:
776 source = th->source;
777 break;
778 case UDP_ENCAP_ESPINUDP:
779 source = uh->source;
780 break;
781 default:
782 WARN_ON_ONCE(1);
783 err = -EINVAL;
784 goto out;
785 }
786
787 /*
788 * 1) if the NAT-T peer's IP or port changed then
789 * advertise the change to the keying daemon.
790 * This is an inbound SA, so just compare
791 * SRC ports.
792 */
793 if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
794 source != encap->encap_sport) {
795 xfrm_address_t ipaddr;
796
797 memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
798 km_new_mapping(x, &ipaddr, source);
799
800 /* XXX: perhaps add an extra
801 * policy check here, to see
802 * if we should allow or
803 * reject a packet from a
804 * different source
805 * address/port.
806 */
807 }
808
809 /*
810 * 2) ignore UDP/TCP checksums in case
811 * of NAT-T in Transport Mode, or
812 * perform other post-processing fixes
813 * as per draft-ietf-ipsec-udp-encaps-06,
814 * section 3.1.2
815 */
816 if (x->props.mode == XFRM_MODE_TRANSPORT)
817 skb->ip_summed = CHECKSUM_UNNECESSARY;
818 }
819
820 skb_postpull_rcsum(skb, skb_network_header(skb),
821 skb_network_header_len(skb));
822 skb_pull_rcsum(skb, hlen);
823 if (x->props.mode == XFRM_MODE_TUNNEL ||
824 x->props.mode == XFRM_MODE_IPTFS)
825 skb_reset_transport_header(skb);
826 else
827 skb_set_transport_header(skb, -hdr_len);
828
829 /* RFC4303: Drop dummy packets without any error */
830 if (err == IPPROTO_NONE)
831 err = -EINVAL;
832
833 out:
834 return err;
835 }
836 EXPORT_SYMBOL_GPL(esp6_input_done2);
837
esp_input_done(void * data,int err)838 static void esp_input_done(void *data, int err)
839 {
840 struct sk_buff *skb = data;
841
842 xfrm_input_resume(skb, esp6_input_done2(skb, err));
843 }
844
esp_input_restore_header(struct sk_buff * skb)845 static void esp_input_restore_header(struct sk_buff *skb)
846 {
847 esp_restore_header(skb, 0);
848 __skb_pull(skb, 4);
849 }
850
esp_input_set_header(struct sk_buff * skb,__be32 * seqhi)851 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
852 {
853 struct xfrm_state *x = xfrm_input_state(skb);
854
855 /* For ESN we move the header forward by 4 bytes to
856 * accommodate the high bits. We will move it back after
857 * decryption.
858 */
859 if ((x->props.flags & XFRM_STATE_ESN)) {
860 struct ip_esp_hdr *esph = skb_push(skb, 4);
861
862 *seqhi = esph->spi;
863 esph->spi = esph->seq_no;
864 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
865 }
866 }
867
esp_input_done_esn(void * data,int err)868 static void esp_input_done_esn(void *data, int err)
869 {
870 struct sk_buff *skb = data;
871
872 esp_input_restore_header(skb);
873 esp_input_done(data, err);
874 }
875
esp6_input(struct xfrm_state * x,struct sk_buff * skb)876 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
877 {
878 struct crypto_aead *aead = x->data;
879 struct aead_request *req;
880 struct sk_buff *trailer;
881 int ivlen = crypto_aead_ivsize(aead);
882 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
883 int nfrags;
884 int assoclen;
885 int seqhilen;
886 int ret = 0;
887 void *tmp;
888 __be32 *seqhi;
889 u8 *iv;
890 struct scatterlist *sg;
891
892 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
893 ret = -EINVAL;
894 goto out;
895 }
896
897 if (elen <= 0) {
898 ret = -EINVAL;
899 goto out;
900 }
901
902 assoclen = sizeof(struct ip_esp_hdr);
903 seqhilen = 0;
904
905 if (x->props.flags & XFRM_STATE_ESN) {
906 seqhilen += sizeof(__be32);
907 assoclen += seqhilen;
908 }
909
910 if (!skb_cloned(skb)) {
911 if (!skb_is_nonlinear(skb)) {
912 nfrags = 1;
913
914 goto skip_cow;
915 } else if (!skb_has_frag_list(skb)) {
916 nfrags = skb_shinfo(skb)->nr_frags;
917 nfrags++;
918
919 goto skip_cow;
920 }
921 }
922
923 nfrags = skb_cow_data(skb, 0, &trailer);
924 if (nfrags < 0) {
925 ret = -EINVAL;
926 goto out;
927 }
928
929 skip_cow:
930 ret = -ENOMEM;
931 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
932 if (!tmp)
933 goto out;
934
935 ESP_SKB_CB(skb)->tmp = tmp;
936 seqhi = esp_tmp_extra(tmp);
937 iv = esp_tmp_iv(aead, tmp, seqhilen);
938 req = esp_tmp_req(aead, iv);
939 sg = esp_req_sg(aead, req);
940
941 esp_input_set_header(skb, seqhi);
942
943 sg_init_table(sg, nfrags);
944 ret = skb_to_sgvec(skb, sg, 0, skb->len);
945 if (unlikely(ret < 0)) {
946 kfree(tmp);
947 goto out;
948 }
949
950 skb->ip_summed = CHECKSUM_NONE;
951
952 if ((x->props.flags & XFRM_STATE_ESN))
953 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
954 else
955 aead_request_set_callback(req, 0, esp_input_done, skb);
956
957 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
958 aead_request_set_ad(req, assoclen);
959
960 ret = crypto_aead_decrypt(req);
961 if (ret == -EINPROGRESS)
962 goto out;
963
964 if ((x->props.flags & XFRM_STATE_ESN))
965 esp_input_restore_header(skb);
966
967 ret = esp6_input_done2(skb, ret);
968
969 out:
970 return ret;
971 }
972
esp6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)973 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
974 u8 type, u8 code, int offset, __be32 info)
975 {
976 struct net *net = dev_net(skb->dev);
977 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
978 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
979 struct xfrm_state *x;
980
981 if (type != ICMPV6_PKT_TOOBIG &&
982 type != NDISC_REDIRECT)
983 return 0;
984
985 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
986 esph->spi, IPPROTO_ESP, AF_INET6);
987 if (!x)
988 return 0;
989
990 if (type == NDISC_REDIRECT)
991 ip6_redirect(skb, net, skb->dev->ifindex, 0,
992 sock_net_uid(net, NULL));
993 else
994 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
995 xfrm_state_put(x);
996
997 return 0;
998 }
999
esp6_destroy(struct xfrm_state * x)1000 static void esp6_destroy(struct xfrm_state *x)
1001 {
1002 struct crypto_aead *aead = x->data;
1003
1004 if (!aead)
1005 return;
1006
1007 crypto_free_aead(aead);
1008 }
1009
esp_init_aead(struct xfrm_state * x,struct netlink_ext_ack * extack)1010 static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
1011 {
1012 char aead_name[CRYPTO_MAX_ALG_NAME];
1013 struct crypto_aead *aead;
1014 int err;
1015
1016 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1017 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
1018 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1019 return -ENAMETOOLONG;
1020 }
1021
1022 aead = crypto_alloc_aead(aead_name, 0, 0);
1023 err = PTR_ERR(aead);
1024 if (IS_ERR(aead))
1025 goto error;
1026
1027 x->data = aead;
1028
1029 err = crypto_aead_setkey(aead, x->aead->alg_key,
1030 (x->aead->alg_key_len + 7) / 8);
1031 if (err)
1032 goto error;
1033
1034 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1035 if (err)
1036 goto error;
1037
1038 return 0;
1039
1040 error:
1041 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1042 return err;
1043 }
1044
esp_init_authenc(struct xfrm_state * x,struct netlink_ext_ack * extack)1045 static int esp_init_authenc(struct xfrm_state *x,
1046 struct netlink_ext_ack *extack)
1047 {
1048 struct crypto_aead *aead;
1049 struct crypto_authenc_key_param *param;
1050 struct rtattr *rta;
1051 char *key;
1052 char *p;
1053 char authenc_name[CRYPTO_MAX_ALG_NAME];
1054 unsigned int keylen;
1055 int err;
1056
1057 err = -ENAMETOOLONG;
1058
1059 if ((x->props.flags & XFRM_STATE_ESN)) {
1060 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1061 "%s%sauthencesn(%s,%s)%s",
1062 x->geniv ?: "", x->geniv ? "(" : "",
1063 x->aalg ? x->aalg->alg_name : "digest_null",
1064 x->ealg->alg_name,
1065 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1066 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1067 goto error;
1068 }
1069 } else {
1070 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1071 "%s%sauthenc(%s,%s)%s",
1072 x->geniv ?: "", x->geniv ? "(" : "",
1073 x->aalg ? x->aalg->alg_name : "digest_null",
1074 x->ealg->alg_name,
1075 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1076 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1077 goto error;
1078 }
1079 }
1080
1081 aead = crypto_alloc_aead(authenc_name, 0, 0);
1082 err = PTR_ERR(aead);
1083 if (IS_ERR(aead)) {
1084 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1085 goto error;
1086 }
1087
1088 x->data = aead;
1089
1090 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1091 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1092 err = -ENOMEM;
1093 key = kmalloc(keylen, GFP_KERNEL);
1094 if (!key)
1095 goto error;
1096
1097 p = key;
1098 rta = (void *)p;
1099 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1100 rta->rta_len = RTA_LENGTH(sizeof(*param));
1101 param = RTA_DATA(rta);
1102 p += RTA_SPACE(sizeof(*param));
1103
1104 if (x->aalg) {
1105 struct xfrm_algo_desc *aalg_desc;
1106
1107 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1108 p += (x->aalg->alg_key_len + 7) / 8;
1109
1110 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1111 BUG_ON(!aalg_desc);
1112
1113 err = -EINVAL;
1114 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1115 crypto_aead_authsize(aead)) {
1116 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1117 goto free_key;
1118 }
1119
1120 err = crypto_aead_setauthsize(
1121 aead, x->aalg->alg_trunc_len / 8);
1122 if (err) {
1123 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1124 goto free_key;
1125 }
1126 }
1127
1128 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1129 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1130
1131 err = crypto_aead_setkey(aead, key, keylen);
1132
1133 free_key:
1134 kfree(key);
1135
1136 error:
1137 return err;
1138 }
1139
esp6_init_state(struct xfrm_state * x,struct netlink_ext_ack * extack)1140 static int esp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
1141 {
1142 struct crypto_aead *aead;
1143 u32 align;
1144 int err;
1145
1146 x->data = NULL;
1147
1148 if (x->aead) {
1149 err = esp_init_aead(x, extack);
1150 } else if (x->ealg) {
1151 err = esp_init_authenc(x, extack);
1152 } else {
1153 NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
1154 err = -EINVAL;
1155 }
1156
1157 if (err)
1158 goto error;
1159
1160 aead = x->data;
1161
1162 x->props.header_len = sizeof(struct ip_esp_hdr) +
1163 crypto_aead_ivsize(aead);
1164 switch (x->props.mode) {
1165 case XFRM_MODE_BEET:
1166 if (x->sel.family != AF_INET6)
1167 x->props.header_len += IPV4_BEET_PHMAXLEN +
1168 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
1169 break;
1170 default:
1171 case XFRM_MODE_TRANSPORT:
1172 break;
1173 case XFRM_MODE_TUNNEL:
1174 x->props.header_len += sizeof(struct ipv6hdr);
1175 break;
1176 }
1177
1178 if (x->encap) {
1179 struct xfrm_encap_tmpl *encap = x->encap;
1180
1181 switch (encap->encap_type) {
1182 default:
1183 NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
1184 err = -EINVAL;
1185 goto error;
1186 case UDP_ENCAP_ESPINUDP:
1187 x->props.header_len += sizeof(struct udphdr);
1188 break;
1189 #ifdef CONFIG_INET6_ESPINTCP
1190 case TCP_ENCAP_ESPINTCP:
1191 /* only the length field, TCP encap is done by
1192 * the socket
1193 */
1194 x->props.header_len += 2;
1195 break;
1196 #endif
1197 }
1198 }
1199
1200 align = ALIGN(crypto_aead_blocksize(aead), 4);
1201 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1202
1203 error:
1204 return err;
1205 }
1206
esp6_rcv_cb(struct sk_buff * skb,int err)1207 static int esp6_rcv_cb(struct sk_buff *skb, int err)
1208 {
1209 return 0;
1210 }
1211
1212 static const struct xfrm_type esp6_type = {
1213 .owner = THIS_MODULE,
1214 .proto = IPPROTO_ESP,
1215 .flags = XFRM_TYPE_REPLAY_PROT,
1216 .init_state = esp6_init_state,
1217 .destructor = esp6_destroy,
1218 .input = esp6_input,
1219 .output = esp6_output,
1220 };
1221
1222 static struct xfrm6_protocol esp6_protocol = {
1223 .handler = xfrm6_rcv,
1224 .input_handler = xfrm_input,
1225 .cb_handler = esp6_rcv_cb,
1226 .err_handler = esp6_err,
1227 .priority = 0,
1228 };
1229
esp6_init(void)1230 static int __init esp6_init(void)
1231 {
1232 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
1233 pr_info("%s: can't add xfrm type\n", __func__);
1234 return -EAGAIN;
1235 }
1236 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
1237 pr_info("%s: can't add protocol\n", __func__);
1238 xfrm_unregister_type(&esp6_type, AF_INET6);
1239 return -EAGAIN;
1240 }
1241
1242 return 0;
1243 }
1244
esp6_fini(void)1245 static void __exit esp6_fini(void)
1246 {
1247 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
1248 pr_info("%s: can't remove protocol\n", __func__);
1249 xfrm_unregister_type(&esp6_type, AF_INET6);
1250 }
1251
1252 module_init(esp6_init);
1253 module_exit(esp6_fini);
1254
1255 MODULE_DESCRIPTION("IPv6 ESP transformation helpers");
1256 MODULE_LICENSE("GPL");
1257 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
1258