1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C)2002 USAGI/WIDE Project
4 *
5 * Authors
6 *
7 * Mitsuru KANDA @USAGI : IPv6 Support
8 * Kazunori MIYAZAWA @USAGI :
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 *
11 * This file is derived from net/ipv4/esp.c
12 */
13
14 #define pr_fmt(fmt) "IPv6: " fmt
15
16 #include <crypto/aead.h>
17 #include <crypto/authenc.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
20 #include <net/ip.h>
21 #include <net/xfrm.h>
22 #include <net/esp.h>
23 #include <linux/scatterlist.h>
24 #include <linux/kernel.h>
25 #include <linux/pfkeyv2.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <net/ip6_checksum.h>
30 #include <net/ip6_route.h>
31 #include <net/icmp.h>
32 #include <net/ipv6.h>
33 #include <net/protocol.h>
34 #include <net/udp.h>
35 #include <linux/icmpv6.h>
36 #include <net/tcp.h>
37 #include <net/espintcp.h>
38 #include <net/inet6_hashtables.h>
39 #include <linux/skbuff_ref.h>
40
41 #include <linux/highmem.h>
42
43 struct esp_skb_cb {
44 struct xfrm_skb_cb xfrm;
45 void *tmp;
46 };
47
48 struct esp_output_extra {
49 __be32 seqhi;
50 u32 esphoff;
51 };
52
53 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
54
55 /*
56 * Allocate an AEAD request structure with extra space for SG and IV.
57 *
58 * For alignment considerations the upper 32 bits of the sequence number are
59 * placed at the front, if present. Followed by the IV, the request and finally
60 * the SG list.
61 *
62 * TODO: Use spare space in skb for this where possible.
63 */
esp_alloc_tmp(struct crypto_aead * aead,int nfrags,int seqihlen)64 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
65 {
66 unsigned int len;
67
68 len = seqihlen;
69
70 len += crypto_aead_ivsize(aead);
71
72 if (len) {
73 len += crypto_aead_alignmask(aead) &
74 ~(crypto_tfm_ctx_alignment() - 1);
75 len = ALIGN(len, crypto_tfm_ctx_alignment());
76 }
77
78 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
79 len = ALIGN(len, __alignof__(struct scatterlist));
80
81 len += sizeof(struct scatterlist) * nfrags;
82
83 return kmalloc(len, GFP_ATOMIC);
84 }
85
esp_tmp_extra(void * tmp)86 static inline void *esp_tmp_extra(void *tmp)
87 {
88 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
89 }
90
esp_tmp_iv(struct crypto_aead * aead,void * tmp,int seqhilen)91 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
92 {
93 return crypto_aead_ivsize(aead) ?
94 PTR_ALIGN((u8 *)tmp + seqhilen,
95 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
96 }
97
esp_tmp_req(struct crypto_aead * aead,u8 * iv)98 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
99 {
100 struct aead_request *req;
101
102 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
103 crypto_tfm_ctx_alignment());
104 aead_request_set_tfm(req, aead);
105 return req;
106 }
107
esp_req_sg(struct crypto_aead * aead,struct aead_request * req)108 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
109 struct aead_request *req)
110 {
111 return (void *)ALIGN((unsigned long)(req + 1) +
112 crypto_aead_reqsize(aead),
113 __alignof__(struct scatterlist));
114 }
115
esp_ssg_unref(struct xfrm_state * x,void * tmp,struct sk_buff * skb)116 static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
117 {
118 struct crypto_aead *aead = x->data;
119 int extralen = 0;
120 u8 *iv;
121 struct aead_request *req;
122 struct scatterlist *sg;
123
124 if (x->props.flags & XFRM_STATE_ESN)
125 extralen += sizeof(struct esp_output_extra);
126
127 iv = esp_tmp_iv(aead, tmp, extralen);
128 req = esp_tmp_req(aead, iv);
129
130 /* Unref skb_frag_pages in the src scatterlist if necessary.
131 * Skip the first sg which comes from skb->data.
132 */
133 if (req->src != req->dst)
134 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
135 skb_page_unref(page_to_netmem(sg_page(sg)),
136 skb->pp_recycle);
137 }
138
139 #ifdef CONFIG_INET6_ESPINTCP
esp6_find_tcp_sk(struct xfrm_state * x)140 static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
141 {
142 struct xfrm_encap_tmpl *encap = x->encap;
143 struct net *net = xs_net(x);
144 __be16 sport, dport;
145 struct sock *sk;
146
147 spin_lock_bh(&x->lock);
148 sport = encap->encap_sport;
149 dport = encap->encap_dport;
150 spin_unlock_bh(&x->lock);
151
152 sk = __inet6_lookup_established(net, &x->id.daddr.in6, dport,
153 &x->props.saddr.in6, ntohs(sport), 0, 0);
154 if (!sk)
155 return ERR_PTR(-ENOENT);
156
157 if (!tcp_is_ulp_esp(sk)) {
158 sock_put(sk);
159 return ERR_PTR(-EINVAL);
160 }
161
162 return sk;
163 }
164
esp_output_tcp_finish(struct xfrm_state * x,struct sk_buff * skb)165 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
166 {
167 struct sock *sk;
168 int err;
169
170 rcu_read_lock();
171
172 sk = esp6_find_tcp_sk(x);
173 err = PTR_ERR_OR_ZERO(sk);
174 if (err) {
175 kfree_skb(skb);
176 goto out;
177 }
178
179 bh_lock_sock(sk);
180 if (sock_owned_by_user(sk))
181 err = espintcp_queue_out(sk, skb);
182 else
183 err = espintcp_push_skb(sk, skb);
184 bh_unlock_sock(sk);
185
186 sock_put(sk);
187
188 out:
189 rcu_read_unlock();
190 return err;
191 }
192
esp_output_tcp_encap_cb(struct net * net,struct sock * sk,struct sk_buff * skb)193 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
194 struct sk_buff *skb)
195 {
196 struct dst_entry *dst = skb_dst(skb);
197 struct xfrm_state *x = dst->xfrm;
198
199 return esp_output_tcp_finish(x, skb);
200 }
201
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)202 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
203 {
204 int err;
205
206 local_bh_disable();
207 err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
208 local_bh_enable();
209
210 /* EINPROGRESS just happens to do the right thing. It
211 * actually means that the skb has been consumed and
212 * isn't coming back.
213 */
214 return err ?: -EINPROGRESS;
215 }
216 #else
esp_output_tail_tcp(struct xfrm_state * x,struct sk_buff * skb)217 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
218 {
219 WARN_ON(1);
220 return -EOPNOTSUPP;
221 }
222 #endif
223
esp_output_encap_csum(struct sk_buff * skb)224 static void esp_output_encap_csum(struct sk_buff *skb)
225 {
226 /* UDP encap with IPv6 requires a valid checksum */
227 if (*skb_mac_header(skb) == IPPROTO_UDP) {
228 struct udphdr *uh = udp_hdr(skb);
229 struct ipv6hdr *ip6h = ipv6_hdr(skb);
230 int len = ntohs(uh->len);
231 unsigned int offset = skb_transport_offset(skb);
232 __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
233
234 uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
235 len, IPPROTO_UDP, csum);
236 if (uh->check == 0)
237 uh->check = CSUM_MANGLED_0;
238 }
239 }
240
esp_output_done(void * data,int err)241 static void esp_output_done(void *data, int err)
242 {
243 struct sk_buff *skb = data;
244 struct xfrm_offload *xo = xfrm_offload(skb);
245 void *tmp;
246 struct xfrm_state *x;
247
248 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
249 struct sec_path *sp = skb_sec_path(skb);
250
251 x = sp->xvec[sp->len - 1];
252 } else {
253 x = skb_dst(skb)->xfrm;
254 }
255
256 tmp = ESP_SKB_CB(skb)->tmp;
257 esp_ssg_unref(x, tmp, skb);
258 kfree(tmp);
259
260 esp_output_encap_csum(skb);
261
262 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
263 if (err) {
264 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
265 kfree_skb(skb);
266 return;
267 }
268
269 skb_push(skb, skb->data - skb_mac_header(skb));
270 secpath_reset(skb);
271 xfrm_dev_resume(skb);
272 } else {
273 if (!err &&
274 x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) {
275 err = esp_output_tail_tcp(x, skb);
276 if (err != -EINPROGRESS)
277 kfree_skb(skb);
278 } else {
279 xfrm_output_resume(skb_to_full_sk(skb), skb, err);
280 }
281 }
282 }
283
284 /* Move ESP header back into place. */
esp_restore_header(struct sk_buff * skb,unsigned int offset)285 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
286 {
287 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
288 void *tmp = ESP_SKB_CB(skb)->tmp;
289 __be32 *seqhi = esp_tmp_extra(tmp);
290
291 esph->seq_no = esph->spi;
292 esph->spi = *seqhi;
293 }
294
esp_output_restore_header(struct sk_buff * skb)295 static void esp_output_restore_header(struct sk_buff *skb)
296 {
297 void *tmp = ESP_SKB_CB(skb)->tmp;
298 struct esp_output_extra *extra = esp_tmp_extra(tmp);
299
300 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
301 sizeof(__be32));
302 }
303
esp_output_set_esn(struct sk_buff * skb,struct xfrm_state * x,struct ip_esp_hdr * esph,struct esp_output_extra * extra)304 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
305 struct xfrm_state *x,
306 struct ip_esp_hdr *esph,
307 struct esp_output_extra *extra)
308 {
309 /* For ESN we move the header forward by 4 bytes to
310 * accommodate the high bits. We will move it back after
311 * encryption.
312 */
313 if ((x->props.flags & XFRM_STATE_ESN)) {
314 __u32 seqhi;
315 struct xfrm_offload *xo = xfrm_offload(skb);
316
317 if (xo)
318 seqhi = xo->seq.hi;
319 else
320 seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
321
322 extra->esphoff = (unsigned char *)esph -
323 skb_transport_header(skb);
324 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
325 extra->seqhi = esph->spi;
326 esph->seq_no = htonl(seqhi);
327 }
328
329 esph->spi = x->id.spi;
330
331 return esph;
332 }
333
esp_output_done_esn(void * data,int err)334 static void esp_output_done_esn(void *data, int err)
335 {
336 struct sk_buff *skb = data;
337
338 esp_output_restore_header(skb);
339 esp_output_done(data, err);
340 }
341
esp6_output_udp_encap(struct sk_buff * skb,int encap_type,struct esp_info * esp,__be16 sport,__be16 dport)342 static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
343 int encap_type,
344 struct esp_info *esp,
345 __be16 sport,
346 __be16 dport)
347 {
348 struct udphdr *uh;
349 unsigned int len;
350
351 len = skb->len + esp->tailen - skb_transport_offset(skb);
352 if (len > U16_MAX)
353 return ERR_PTR(-EMSGSIZE);
354
355 uh = (struct udphdr *)esp->esph;
356 uh->source = sport;
357 uh->dest = dport;
358 uh->len = htons(len);
359 uh->check = 0;
360
361 *skb_mac_header(skb) = IPPROTO_UDP;
362
363 return (struct ip_esp_hdr *)(uh + 1);
364 }
365
366 #ifdef CONFIG_INET6_ESPINTCP
esp6_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)367 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
368 struct sk_buff *skb,
369 struct esp_info *esp)
370 {
371 __be16 *lenp = (void *)esp->esph;
372 struct ip_esp_hdr *esph;
373 unsigned int len;
374 struct sock *sk;
375
376 len = skb->len + esp->tailen - skb_transport_offset(skb);
377 if (len > IP_MAX_MTU)
378 return ERR_PTR(-EMSGSIZE);
379
380 rcu_read_lock();
381 sk = esp6_find_tcp_sk(x);
382 rcu_read_unlock();
383
384 if (IS_ERR(sk))
385 return ERR_CAST(sk);
386
387 sock_put(sk);
388
389 *lenp = htons(len);
390 esph = (struct ip_esp_hdr *)(lenp + 1);
391
392 return esph;
393 }
394 #else
esp6_output_tcp_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)395 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
396 struct sk_buff *skb,
397 struct esp_info *esp)
398 {
399 return ERR_PTR(-EOPNOTSUPP);
400 }
401 #endif
402
esp6_output_encap(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)403 static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
404 struct esp_info *esp)
405 {
406 struct xfrm_encap_tmpl *encap = x->encap;
407 struct ip_esp_hdr *esph;
408 __be16 sport, dport;
409 int encap_type;
410
411 spin_lock_bh(&x->lock);
412 sport = encap->encap_sport;
413 dport = encap->encap_dport;
414 encap_type = encap->encap_type;
415 spin_unlock_bh(&x->lock);
416
417 switch (encap_type) {
418 default:
419 case UDP_ENCAP_ESPINUDP:
420 esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
421 break;
422 case TCP_ENCAP_ESPINTCP:
423 esph = esp6_output_tcp_encap(x, skb, esp);
424 break;
425 }
426
427 if (IS_ERR(esph))
428 return PTR_ERR(esph);
429
430 esp->esph = esph;
431
432 return 0;
433 }
434
esp6_output_head(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)435 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
436 {
437 u8 *tail;
438 int nfrags;
439 int esph_offset;
440 struct page *page;
441 struct sk_buff *trailer;
442 int tailen = esp->tailen;
443
444 if (x->encap) {
445 int err = esp6_output_encap(x, skb, esp);
446
447 if (err < 0)
448 return err;
449 }
450
451 if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
452 ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
453 goto cow;
454
455 if (!skb_cloned(skb)) {
456 if (tailen <= skb_tailroom(skb)) {
457 nfrags = 1;
458 trailer = skb;
459 tail = skb_tail_pointer(trailer);
460
461 goto skip_cow;
462 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
463 && !skb_has_frag_list(skb)) {
464 int allocsize;
465 struct sock *sk = skb->sk;
466 struct page_frag *pfrag = &x->xfrag;
467
468 esp->inplace = false;
469
470 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
471
472 spin_lock_bh(&x->lock);
473
474 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
475 spin_unlock_bh(&x->lock);
476 goto cow;
477 }
478
479 page = pfrag->page;
480 get_page(page);
481
482 tail = page_address(page) + pfrag->offset;
483
484 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
485
486 nfrags = skb_shinfo(skb)->nr_frags;
487
488 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
489 tailen);
490 skb_shinfo(skb)->nr_frags = ++nfrags;
491
492 pfrag->offset = pfrag->offset + allocsize;
493
494 spin_unlock_bh(&x->lock);
495
496 nfrags++;
497
498 skb->len += tailen;
499 skb->data_len += tailen;
500 skb->truesize += tailen;
501 if (sk && sk_fullsock(sk))
502 refcount_add(tailen, &sk->sk_wmem_alloc);
503
504 goto out;
505 }
506 }
507
508 cow:
509 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
510
511 nfrags = skb_cow_data(skb, tailen, &trailer);
512 if (nfrags < 0)
513 goto out;
514 tail = skb_tail_pointer(trailer);
515 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
516
517 skip_cow:
518 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
519 pskb_put(skb, trailer, tailen);
520
521 out:
522 return nfrags;
523 }
524 EXPORT_SYMBOL_GPL(esp6_output_head);
525
esp6_output_tail(struct xfrm_state * x,struct sk_buff * skb,struct esp_info * esp)526 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
527 {
528 u8 *iv;
529 int alen;
530 void *tmp;
531 int ivlen;
532 int assoclen;
533 int extralen;
534 struct page *page;
535 struct ip_esp_hdr *esph;
536 struct aead_request *req;
537 struct crypto_aead *aead;
538 struct scatterlist *sg, *dsg;
539 struct esp_output_extra *extra;
540 int err = -ENOMEM;
541
542 assoclen = sizeof(struct ip_esp_hdr);
543 extralen = 0;
544
545 if (x->props.flags & XFRM_STATE_ESN) {
546 extralen += sizeof(*extra);
547 assoclen += sizeof(__be32);
548 }
549
550 aead = x->data;
551 alen = crypto_aead_authsize(aead);
552 ivlen = crypto_aead_ivsize(aead);
553
554 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
555 if (!tmp)
556 goto error;
557
558 extra = esp_tmp_extra(tmp);
559 iv = esp_tmp_iv(aead, tmp, extralen);
560 req = esp_tmp_req(aead, iv);
561 sg = esp_req_sg(aead, req);
562
563 if (esp->inplace)
564 dsg = sg;
565 else
566 dsg = &sg[esp->nfrags];
567
568 esph = esp_output_set_esn(skb, x, esp->esph, extra);
569 esp->esph = esph;
570
571 sg_init_table(sg, esp->nfrags);
572 err = skb_to_sgvec(skb, sg,
573 (unsigned char *)esph - skb->data,
574 assoclen + ivlen + esp->clen + alen);
575 if (unlikely(err < 0))
576 goto error_free;
577
578 if (!esp->inplace) {
579 int allocsize;
580 struct page_frag *pfrag = &x->xfrag;
581
582 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
583
584 spin_lock_bh(&x->lock);
585 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
586 spin_unlock_bh(&x->lock);
587 goto error_free;
588 }
589
590 skb_shinfo(skb)->nr_frags = 1;
591
592 page = pfrag->page;
593 get_page(page);
594 /* replace page frags in skb with new page */
595 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
596 pfrag->offset = pfrag->offset + allocsize;
597 spin_unlock_bh(&x->lock);
598
599 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
600 err = skb_to_sgvec(skb, dsg,
601 (unsigned char *)esph - skb->data,
602 assoclen + ivlen + esp->clen + alen);
603 if (unlikely(err < 0))
604 goto error_free;
605 }
606
607 if ((x->props.flags & XFRM_STATE_ESN))
608 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
609 else
610 aead_request_set_callback(req, 0, esp_output_done, skb);
611
612 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
613 aead_request_set_ad(req, assoclen);
614
615 memset(iv, 0, ivlen);
616 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
617 min(ivlen, 8));
618
619 ESP_SKB_CB(skb)->tmp = tmp;
620 err = crypto_aead_encrypt(req);
621
622 switch (err) {
623 case -EINPROGRESS:
624 goto error;
625
626 case -ENOSPC:
627 err = NET_XMIT_DROP;
628 break;
629
630 case 0:
631 if ((x->props.flags & XFRM_STATE_ESN))
632 esp_output_restore_header(skb);
633 esp_output_encap_csum(skb);
634 }
635
636 if (sg != dsg)
637 esp_ssg_unref(x, tmp, skb);
638
639 if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
640 err = esp_output_tail_tcp(x, skb);
641
642 error_free:
643 kfree(tmp);
644 error:
645 return err;
646 }
647 EXPORT_SYMBOL_GPL(esp6_output_tail);
648
esp6_output(struct xfrm_state * x,struct sk_buff * skb)649 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
650 {
651 int alen;
652 int blksize;
653 struct ip_esp_hdr *esph;
654 struct crypto_aead *aead;
655 struct esp_info esp;
656
657 esp.inplace = true;
658
659 esp.proto = *skb_mac_header(skb);
660 *skb_mac_header(skb) = IPPROTO_ESP;
661
662 /* skb is pure payload to encrypt */
663
664 aead = x->data;
665 alen = crypto_aead_authsize(aead);
666
667 esp.tfclen = 0;
668 if (x->tfcpad) {
669 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
670 u32 padto;
671
672 padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
673 if (skb->len < padto)
674 esp.tfclen = padto - skb->len;
675 }
676 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
677 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
678 esp.plen = esp.clen - skb->len - esp.tfclen;
679 esp.tailen = esp.tfclen + esp.plen + alen;
680
681 esp.esph = ip_esp_hdr(skb);
682
683 esp.nfrags = esp6_output_head(x, skb, &esp);
684 if (esp.nfrags < 0)
685 return esp.nfrags;
686
687 esph = esp.esph;
688 esph->spi = x->id.spi;
689
690 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
691 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
692 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
693
694 skb_push(skb, -skb_network_offset(skb));
695
696 return esp6_output_tail(x, skb, &esp);
697 }
698
esp_remove_trailer(struct sk_buff * skb)699 static inline int esp_remove_trailer(struct sk_buff *skb)
700 {
701 struct xfrm_state *x = xfrm_input_state(skb);
702 struct crypto_aead *aead = x->data;
703 int alen, hlen, elen;
704 int padlen, trimlen;
705 __wsum csumdiff;
706 u8 nexthdr[2];
707 int ret;
708
709 alen = crypto_aead_authsize(aead);
710 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
711 elen = skb->len - hlen;
712
713 ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
714 BUG_ON(ret);
715
716 ret = -EINVAL;
717 padlen = nexthdr[0];
718 if (padlen + 2 + alen >= elen) {
719 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
720 padlen + 2, elen - alen);
721 goto out;
722 }
723
724 trimlen = alen + padlen + 2;
725 if (skb->ip_summed == CHECKSUM_COMPLETE) {
726 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
727 skb->csum = csum_block_sub(skb->csum, csumdiff,
728 skb->len - trimlen);
729 }
730 ret = pskb_trim(skb, skb->len - trimlen);
731 if (unlikely(ret))
732 return ret;
733
734 ret = nexthdr[1];
735
736 out:
737 return ret;
738 }
739
esp6_input_done2(struct sk_buff * skb,int err)740 int esp6_input_done2(struct sk_buff *skb, int err)
741 {
742 struct xfrm_state *x = xfrm_input_state(skb);
743 struct xfrm_offload *xo = xfrm_offload(skb);
744 struct crypto_aead *aead = x->data;
745 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
746 int hdr_len = skb_network_header_len(skb);
747
748 if (!xo || !(xo->flags & CRYPTO_DONE))
749 kfree(ESP_SKB_CB(skb)->tmp);
750
751 if (unlikely(err))
752 goto out;
753
754 err = esp_remove_trailer(skb);
755 if (unlikely(err < 0))
756 goto out;
757
758 if (x->encap) {
759 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
760 int offset = skb_network_offset(skb) + sizeof(*ip6h);
761 struct xfrm_encap_tmpl *encap = x->encap;
762 u8 nexthdr = ip6h->nexthdr;
763 __be16 frag_off, source;
764 struct udphdr *uh;
765 struct tcphdr *th;
766
767 offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
768 if (offset == -1) {
769 err = -EINVAL;
770 goto out;
771 }
772
773 uh = (void *)(skb->data + offset);
774 th = (void *)(skb->data + offset);
775 hdr_len += offset;
776
777 switch (x->encap->encap_type) {
778 case TCP_ENCAP_ESPINTCP:
779 source = th->source;
780 break;
781 case UDP_ENCAP_ESPINUDP:
782 source = uh->source;
783 break;
784 default:
785 WARN_ON_ONCE(1);
786 err = -EINVAL;
787 goto out;
788 }
789
790 /*
791 * 1) if the NAT-T peer's IP or port changed then
792 * advertise the change to the keying daemon.
793 * This is an inbound SA, so just compare
794 * SRC ports.
795 */
796 if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
797 source != encap->encap_sport) {
798 xfrm_address_t ipaddr;
799
800 memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
801 km_new_mapping(x, &ipaddr, source);
802
803 /* XXX: perhaps add an extra
804 * policy check here, to see
805 * if we should allow or
806 * reject a packet from a
807 * different source
808 * address/port.
809 */
810 }
811
812 /*
813 * 2) ignore UDP/TCP checksums in case
814 * of NAT-T in Transport Mode, or
815 * perform other post-processing fixes
816 * as per draft-ietf-ipsec-udp-encaps-06,
817 * section 3.1.2
818 */
819 if (x->props.mode == XFRM_MODE_TRANSPORT)
820 skb->ip_summed = CHECKSUM_UNNECESSARY;
821 }
822
823 skb_postpull_rcsum(skb, skb_network_header(skb),
824 skb_network_header_len(skb));
825 skb_pull_rcsum(skb, hlen);
826 if (x->props.mode == XFRM_MODE_TUNNEL ||
827 x->props.mode == XFRM_MODE_IPTFS)
828 skb_reset_transport_header(skb);
829 else
830 skb_set_transport_header(skb, -hdr_len);
831
832 /* RFC4303: Drop dummy packets without any error */
833 if (err == IPPROTO_NONE)
834 err = -EINVAL;
835
836 out:
837 return err;
838 }
839 EXPORT_SYMBOL_GPL(esp6_input_done2);
840
esp_input_done(void * data,int err)841 static void esp_input_done(void *data, int err)
842 {
843 struct sk_buff *skb = data;
844
845 xfrm_input_resume(skb, esp6_input_done2(skb, err));
846 }
847
esp_input_restore_header(struct sk_buff * skb)848 static void esp_input_restore_header(struct sk_buff *skb)
849 {
850 esp_restore_header(skb, 0);
851 __skb_pull(skb, 4);
852 }
853
esp_input_set_header(struct sk_buff * skb,__be32 * seqhi)854 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
855 {
856 struct xfrm_state *x = xfrm_input_state(skb);
857
858 /* For ESN we move the header forward by 4 bytes to
859 * accommodate the high bits. We will move it back after
860 * decryption.
861 */
862 if ((x->props.flags & XFRM_STATE_ESN)) {
863 struct ip_esp_hdr *esph = skb_push(skb, 4);
864
865 *seqhi = esph->spi;
866 esph->spi = esph->seq_no;
867 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
868 }
869 }
870
esp_input_done_esn(void * data,int err)871 static void esp_input_done_esn(void *data, int err)
872 {
873 struct sk_buff *skb = data;
874
875 esp_input_restore_header(skb);
876 esp_input_done(data, err);
877 }
878
esp6_input(struct xfrm_state * x,struct sk_buff * skb)879 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
880 {
881 struct crypto_aead *aead = x->data;
882 struct aead_request *req;
883 struct sk_buff *trailer;
884 int ivlen = crypto_aead_ivsize(aead);
885 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
886 int nfrags;
887 int assoclen;
888 int seqhilen;
889 int ret = 0;
890 void *tmp;
891 __be32 *seqhi;
892 u8 *iv;
893 struct scatterlist *sg;
894
895 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
896 ret = -EINVAL;
897 goto out;
898 }
899
900 if (elen <= 0) {
901 ret = -EINVAL;
902 goto out;
903 }
904
905 assoclen = sizeof(struct ip_esp_hdr);
906 seqhilen = 0;
907
908 if (x->props.flags & XFRM_STATE_ESN) {
909 seqhilen += sizeof(__be32);
910 assoclen += seqhilen;
911 }
912
913 if (!skb_cloned(skb)) {
914 if (!skb_is_nonlinear(skb)) {
915 nfrags = 1;
916
917 goto skip_cow;
918 } else if (!skb_has_frag_list(skb)) {
919 nfrags = skb_shinfo(skb)->nr_frags;
920 nfrags++;
921
922 goto skip_cow;
923 }
924 }
925
926 nfrags = skb_cow_data(skb, 0, &trailer);
927 if (nfrags < 0) {
928 ret = -EINVAL;
929 goto out;
930 }
931
932 skip_cow:
933 ret = -ENOMEM;
934 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
935 if (!tmp)
936 goto out;
937
938 ESP_SKB_CB(skb)->tmp = tmp;
939 seqhi = esp_tmp_extra(tmp);
940 iv = esp_tmp_iv(aead, tmp, seqhilen);
941 req = esp_tmp_req(aead, iv);
942 sg = esp_req_sg(aead, req);
943
944 esp_input_set_header(skb, seqhi);
945
946 sg_init_table(sg, nfrags);
947 ret = skb_to_sgvec(skb, sg, 0, skb->len);
948 if (unlikely(ret < 0)) {
949 kfree(tmp);
950 goto out;
951 }
952
953 skb->ip_summed = CHECKSUM_NONE;
954
955 if ((x->props.flags & XFRM_STATE_ESN))
956 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
957 else
958 aead_request_set_callback(req, 0, esp_input_done, skb);
959
960 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
961 aead_request_set_ad(req, assoclen);
962
963 ret = crypto_aead_decrypt(req);
964 if (ret == -EINPROGRESS)
965 goto out;
966
967 if ((x->props.flags & XFRM_STATE_ESN))
968 esp_input_restore_header(skb);
969
970 ret = esp6_input_done2(skb, ret);
971
972 out:
973 return ret;
974 }
975
esp6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)976 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
977 u8 type, u8 code, int offset, __be32 info)
978 {
979 struct net *net = dev_net(skb->dev);
980 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
981 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
982 struct xfrm_state *x;
983
984 if (type != ICMPV6_PKT_TOOBIG &&
985 type != NDISC_REDIRECT)
986 return 0;
987
988 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
989 esph->spi, IPPROTO_ESP, AF_INET6);
990 if (!x)
991 return 0;
992
993 if (type == NDISC_REDIRECT)
994 ip6_redirect(skb, net, skb->dev->ifindex, 0,
995 sock_net_uid(net, NULL));
996 else
997 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
998 xfrm_state_put(x);
999
1000 return 0;
1001 }
1002
esp6_destroy(struct xfrm_state * x)1003 static void esp6_destroy(struct xfrm_state *x)
1004 {
1005 struct crypto_aead *aead = x->data;
1006
1007 if (!aead)
1008 return;
1009
1010 crypto_free_aead(aead);
1011 }
1012
esp_init_aead(struct xfrm_state * x,struct netlink_ext_ack * extack)1013 static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
1014 {
1015 char aead_name[CRYPTO_MAX_ALG_NAME];
1016 struct crypto_aead *aead;
1017 int err;
1018
1019 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1020 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
1021 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1022 return -ENAMETOOLONG;
1023 }
1024
1025 aead = crypto_alloc_aead(aead_name, 0, 0);
1026 err = PTR_ERR(aead);
1027 if (IS_ERR(aead))
1028 goto error;
1029
1030 x->data = aead;
1031
1032 err = crypto_aead_setkey(aead, x->aead->alg_key,
1033 (x->aead->alg_key_len + 7) / 8);
1034 if (err)
1035 goto error;
1036
1037 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1038 if (err)
1039 goto error;
1040
1041 return 0;
1042
1043 error:
1044 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1045 return err;
1046 }
1047
esp_init_authenc(struct xfrm_state * x,struct netlink_ext_ack * extack)1048 static int esp_init_authenc(struct xfrm_state *x,
1049 struct netlink_ext_ack *extack)
1050 {
1051 struct crypto_aead *aead;
1052 struct crypto_authenc_key_param *param;
1053 struct rtattr *rta;
1054 char *key;
1055 char *p;
1056 char authenc_name[CRYPTO_MAX_ALG_NAME];
1057 unsigned int keylen;
1058 int err;
1059
1060 err = -ENAMETOOLONG;
1061
1062 if ((x->props.flags & XFRM_STATE_ESN)) {
1063 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1064 "%s%sauthencesn(%s,%s)%s",
1065 x->geniv ?: "", x->geniv ? "(" : "",
1066 x->aalg ? x->aalg->alg_name : "digest_null",
1067 x->ealg->alg_name,
1068 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1069 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1070 goto error;
1071 }
1072 } else {
1073 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1074 "%s%sauthenc(%s,%s)%s",
1075 x->geniv ?: "", x->geniv ? "(" : "",
1076 x->aalg ? x->aalg->alg_name : "digest_null",
1077 x->ealg->alg_name,
1078 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1079 NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1080 goto error;
1081 }
1082 }
1083
1084 aead = crypto_alloc_aead(authenc_name, 0, 0);
1085 err = PTR_ERR(aead);
1086 if (IS_ERR(aead)) {
1087 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1088 goto error;
1089 }
1090
1091 x->data = aead;
1092
1093 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1094 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1095 err = -ENOMEM;
1096 key = kmalloc(keylen, GFP_KERNEL);
1097 if (!key)
1098 goto error;
1099
1100 p = key;
1101 rta = (void *)p;
1102 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1103 rta->rta_len = RTA_LENGTH(sizeof(*param));
1104 param = RTA_DATA(rta);
1105 p += RTA_SPACE(sizeof(*param));
1106
1107 if (x->aalg) {
1108 struct xfrm_algo_desc *aalg_desc;
1109
1110 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1111 p += (x->aalg->alg_key_len + 7) / 8;
1112
1113 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1114 BUG_ON(!aalg_desc);
1115
1116 err = -EINVAL;
1117 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1118 crypto_aead_authsize(aead)) {
1119 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1120 goto free_key;
1121 }
1122
1123 err = crypto_aead_setauthsize(
1124 aead, x->aalg->alg_trunc_len / 8);
1125 if (err) {
1126 NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1127 goto free_key;
1128 }
1129 }
1130
1131 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1132 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1133
1134 err = crypto_aead_setkey(aead, key, keylen);
1135
1136 free_key:
1137 kfree(key);
1138
1139 error:
1140 return err;
1141 }
1142
esp6_init_state(struct xfrm_state * x,struct netlink_ext_ack * extack)1143 static int esp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
1144 {
1145 struct crypto_aead *aead;
1146 u32 align;
1147 int err;
1148
1149 x->data = NULL;
1150
1151 if (x->aead) {
1152 err = esp_init_aead(x, extack);
1153 } else if (x->ealg) {
1154 err = esp_init_authenc(x, extack);
1155 } else {
1156 NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
1157 err = -EINVAL;
1158 }
1159
1160 if (err)
1161 goto error;
1162
1163 aead = x->data;
1164
1165 x->props.header_len = sizeof(struct ip_esp_hdr) +
1166 crypto_aead_ivsize(aead);
1167 switch (x->props.mode) {
1168 case XFRM_MODE_BEET:
1169 if (x->sel.family != AF_INET6)
1170 x->props.header_len += IPV4_BEET_PHMAXLEN +
1171 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
1172 break;
1173 default:
1174 case XFRM_MODE_TRANSPORT:
1175 break;
1176 case XFRM_MODE_TUNNEL:
1177 x->props.header_len += sizeof(struct ipv6hdr);
1178 break;
1179 }
1180
1181 if (x->encap) {
1182 struct xfrm_encap_tmpl *encap = x->encap;
1183
1184 switch (encap->encap_type) {
1185 default:
1186 NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
1187 err = -EINVAL;
1188 goto error;
1189 case UDP_ENCAP_ESPINUDP:
1190 x->props.header_len += sizeof(struct udphdr);
1191 break;
1192 #ifdef CONFIG_INET6_ESPINTCP
1193 case TCP_ENCAP_ESPINTCP:
1194 /* only the length field, TCP encap is done by
1195 * the socket
1196 */
1197 x->props.header_len += 2;
1198 break;
1199 #endif
1200 }
1201 }
1202
1203 align = ALIGN(crypto_aead_blocksize(aead), 4);
1204 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1205
1206 error:
1207 return err;
1208 }
1209
esp6_rcv_cb(struct sk_buff * skb,int err)1210 static int esp6_rcv_cb(struct sk_buff *skb, int err)
1211 {
1212 return 0;
1213 }
1214
1215 static const struct xfrm_type esp6_type = {
1216 .owner = THIS_MODULE,
1217 .proto = IPPROTO_ESP,
1218 .flags = XFRM_TYPE_REPLAY_PROT,
1219 .init_state = esp6_init_state,
1220 .destructor = esp6_destroy,
1221 .input = esp6_input,
1222 .output = esp6_output,
1223 };
1224
1225 static struct xfrm6_protocol esp6_protocol = {
1226 .handler = xfrm6_rcv,
1227 .input_handler = xfrm_input,
1228 .cb_handler = esp6_rcv_cb,
1229 .err_handler = esp6_err,
1230 .priority = 0,
1231 };
1232
esp6_init(void)1233 static int __init esp6_init(void)
1234 {
1235 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
1236 pr_info("%s: can't add xfrm type\n", __func__);
1237 return -EAGAIN;
1238 }
1239 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
1240 pr_info("%s: can't add protocol\n", __func__);
1241 xfrm_unregister_type(&esp6_type, AF_INET6);
1242 return -EAGAIN;
1243 }
1244
1245 return 0;
1246 }
1247
esp6_fini(void)1248 static void __exit esp6_fini(void)
1249 {
1250 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
1251 pr_info("%s: can't remove protocol\n", __func__);
1252 xfrm_unregister_type(&esp6_type, AF_INET6);
1253 }
1254
1255 module_init(esp6_init);
1256 module_exit(esp6_fini);
1257
1258 MODULE_DESCRIPTION("IPv6 ESP transformation helpers");
1259 MODULE_LICENSE("GPL");
1260 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
1261