xref: /linux/net/ipv6/esp6.c (revision b7019ac550eb3916f34d79db583e9b7ea2524afa)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C)2002 USAGI/WIDE Project
4  *
5  * Authors
6  *
7  *	Mitsuru KANDA @USAGI       : IPv6 Support
8  *	Kazunori MIYAZAWA @USAGI   :
9  *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10  *
11  *	This file is derived from net/ipv4/esp.c
12  */
13 
14 #define pr_fmt(fmt) "IPv6: " fmt
15 
16 #include <crypto/aead.h>
17 #include <crypto/authenc.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
20 #include <net/ip.h>
21 #include <net/xfrm.h>
22 #include <net/esp.h>
23 #include <linux/scatterlist.h>
24 #include <linux/kernel.h>
25 #include <linux/pfkeyv2.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <net/ip6_route.h>
30 #include <net/icmp.h>
31 #include <net/ipv6.h>
32 #include <net/protocol.h>
33 #include <linux/icmpv6.h>
34 
35 #include <linux/highmem.h>
36 
37 struct esp_skb_cb {
38 	struct xfrm_skb_cb xfrm;
39 	void *tmp;
40 };
41 
42 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
43 
44 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
45 
46 /*
47  * Allocate an AEAD request structure with extra space for SG and IV.
48  *
49  * For alignment considerations the upper 32 bits of the sequence number are
50  * placed at the front, if present. Followed by the IV, the request and finally
51  * the SG list.
52  *
53  * TODO: Use spare space in skb for this where possible.
54  */
55 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
56 {
57 	unsigned int len;
58 
59 	len = seqihlen;
60 
61 	len += crypto_aead_ivsize(aead);
62 
63 	if (len) {
64 		len += crypto_aead_alignmask(aead) &
65 		       ~(crypto_tfm_ctx_alignment() - 1);
66 		len = ALIGN(len, crypto_tfm_ctx_alignment());
67 	}
68 
69 	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
70 	len = ALIGN(len, __alignof__(struct scatterlist));
71 
72 	len += sizeof(struct scatterlist) * nfrags;
73 
74 	return kmalloc(len, GFP_ATOMIC);
75 }
76 
77 static inline __be32 *esp_tmp_seqhi(void *tmp)
78 {
79 	return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
80 }
81 
82 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
83 {
84 	return crypto_aead_ivsize(aead) ?
85 	       PTR_ALIGN((u8 *)tmp + seqhilen,
86 			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
87 }
88 
89 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
90 {
91 	struct aead_request *req;
92 
93 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
94 				crypto_tfm_ctx_alignment());
95 	aead_request_set_tfm(req, aead);
96 	return req;
97 }
98 
99 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
100 					     struct aead_request *req)
101 {
102 	return (void *)ALIGN((unsigned long)(req + 1) +
103 			     crypto_aead_reqsize(aead),
104 			     __alignof__(struct scatterlist));
105 }
106 
107 static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
108 {
109 	struct crypto_aead *aead = x->data;
110 	int seqhilen = 0;
111 	u8 *iv;
112 	struct aead_request *req;
113 	struct scatterlist *sg;
114 
115 	if (x->props.flags & XFRM_STATE_ESN)
116 		seqhilen += sizeof(__be32);
117 
118 	iv = esp_tmp_iv(aead, tmp, seqhilen);
119 	req = esp_tmp_req(aead, iv);
120 
121 	/* Unref skb_frag_pages in the src scatterlist if necessary.
122 	 * Skip the first sg which comes from skb->data.
123 	 */
124 	if (req->src != req->dst)
125 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
126 			put_page(sg_page(sg));
127 }
128 
129 static void esp_output_done(struct crypto_async_request *base, int err)
130 {
131 	struct sk_buff *skb = base->data;
132 	struct xfrm_offload *xo = xfrm_offload(skb);
133 	void *tmp;
134 	struct xfrm_state *x;
135 
136 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
137 		struct sec_path *sp = skb_sec_path(skb);
138 
139 		x = sp->xvec[sp->len - 1];
140 	} else {
141 		x = skb_dst(skb)->xfrm;
142 	}
143 
144 	tmp = ESP_SKB_CB(skb)->tmp;
145 	esp_ssg_unref(x, tmp);
146 	kfree(tmp);
147 
148 	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
149 		if (err) {
150 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
151 			kfree_skb(skb);
152 			return;
153 		}
154 
155 		skb_push(skb, skb->data - skb_mac_header(skb));
156 		secpath_reset(skb);
157 		xfrm_dev_resume(skb);
158 	} else {
159 		xfrm_output_resume(skb, err);
160 	}
161 }
162 
163 /* Move ESP header back into place. */
164 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
165 {
166 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
167 	void *tmp = ESP_SKB_CB(skb)->tmp;
168 	__be32 *seqhi = esp_tmp_seqhi(tmp);
169 
170 	esph->seq_no = esph->spi;
171 	esph->spi = *seqhi;
172 }
173 
174 static void esp_output_restore_header(struct sk_buff *skb)
175 {
176 	esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
177 }
178 
179 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
180 					     struct xfrm_state *x,
181 					     struct ip_esp_hdr *esph,
182 					     __be32 *seqhi)
183 {
184 	/* For ESN we move the header forward by 4 bytes to
185 	 * accomodate the high bits.  We will move it back after
186 	 * encryption.
187 	 */
188 	if ((x->props.flags & XFRM_STATE_ESN)) {
189 		struct xfrm_offload *xo = xfrm_offload(skb);
190 
191 		esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
192 		*seqhi = esph->spi;
193 		if (xo)
194 			esph->seq_no = htonl(xo->seq.hi);
195 		else
196 			esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
197 	}
198 
199 	esph->spi = x->id.spi;
200 
201 	return esph;
202 }
203 
204 static void esp_output_done_esn(struct crypto_async_request *base, int err)
205 {
206 	struct sk_buff *skb = base->data;
207 
208 	esp_output_restore_header(skb);
209 	esp_output_done(base, err);
210 }
211 
212 static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
213 {
214 	/* Fill padding... */
215 	if (tfclen) {
216 		memset(tail, 0, tfclen);
217 		tail += tfclen;
218 	}
219 	do {
220 		int i;
221 		for (i = 0; i < plen - 2; i++)
222 			tail[i] = i + 1;
223 	} while (0);
224 	tail[plen - 2] = plen - 2;
225 	tail[plen - 1] = proto;
226 }
227 
228 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
229 {
230 	u8 *tail;
231 	u8 *vaddr;
232 	int nfrags;
233 	struct page *page;
234 	struct sk_buff *trailer;
235 	int tailen = esp->tailen;
236 
237 	if (!skb_cloned(skb)) {
238 		if (tailen <= skb_tailroom(skb)) {
239 			nfrags = 1;
240 			trailer = skb;
241 			tail = skb_tail_pointer(trailer);
242 
243 			goto skip_cow;
244 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
245 			   && !skb_has_frag_list(skb)) {
246 			int allocsize;
247 			struct sock *sk = skb->sk;
248 			struct page_frag *pfrag = &x->xfrag;
249 
250 			esp->inplace = false;
251 
252 			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
253 
254 			spin_lock_bh(&x->lock);
255 
256 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
257 				spin_unlock_bh(&x->lock);
258 				goto cow;
259 			}
260 
261 			page = pfrag->page;
262 			get_page(page);
263 
264 			vaddr = kmap_atomic(page);
265 
266 			tail = vaddr + pfrag->offset;
267 
268 			esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
269 
270 			kunmap_atomic(vaddr);
271 
272 			nfrags = skb_shinfo(skb)->nr_frags;
273 
274 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
275 					     tailen);
276 			skb_shinfo(skb)->nr_frags = ++nfrags;
277 
278 			pfrag->offset = pfrag->offset + allocsize;
279 
280 			spin_unlock_bh(&x->lock);
281 
282 			nfrags++;
283 
284 			skb->len += tailen;
285 			skb->data_len += tailen;
286 			skb->truesize += tailen;
287 			if (sk && sk_fullsock(sk))
288 				refcount_add(tailen, &sk->sk_wmem_alloc);
289 
290 			goto out;
291 		}
292 	}
293 
294 cow:
295 	nfrags = skb_cow_data(skb, tailen, &trailer);
296 	if (nfrags < 0)
297 		goto out;
298 	tail = skb_tail_pointer(trailer);
299 
300 skip_cow:
301 	esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
302 	pskb_put(skb, trailer, tailen);
303 
304 out:
305 	return nfrags;
306 }
307 EXPORT_SYMBOL_GPL(esp6_output_head);
308 
309 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
310 {
311 	u8 *iv;
312 	int alen;
313 	void *tmp;
314 	int ivlen;
315 	int assoclen;
316 	int seqhilen;
317 	__be32 *seqhi;
318 	struct page *page;
319 	struct ip_esp_hdr *esph;
320 	struct aead_request *req;
321 	struct crypto_aead *aead;
322 	struct scatterlist *sg, *dsg;
323 	int err = -ENOMEM;
324 
325 	assoclen = sizeof(struct ip_esp_hdr);
326 	seqhilen = 0;
327 
328 	if (x->props.flags & XFRM_STATE_ESN) {
329 		seqhilen += sizeof(__be32);
330 		assoclen += sizeof(__be32);
331 	}
332 
333 	aead = x->data;
334 	alen = crypto_aead_authsize(aead);
335 	ivlen = crypto_aead_ivsize(aead);
336 
337 	tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen);
338 	if (!tmp)
339 		goto error;
340 
341 	seqhi = esp_tmp_seqhi(tmp);
342 	iv = esp_tmp_iv(aead, tmp, seqhilen);
343 	req = esp_tmp_req(aead, iv);
344 	sg = esp_req_sg(aead, req);
345 
346 	if (esp->inplace)
347 		dsg = sg;
348 	else
349 		dsg = &sg[esp->nfrags];
350 
351 	esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
352 
353 	sg_init_table(sg, esp->nfrags);
354 	err = skb_to_sgvec(skb, sg,
355 		           (unsigned char *)esph - skb->data,
356 		           assoclen + ivlen + esp->clen + alen);
357 	if (unlikely(err < 0))
358 		goto error_free;
359 
360 	if (!esp->inplace) {
361 		int allocsize;
362 		struct page_frag *pfrag = &x->xfrag;
363 
364 		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
365 
366 		spin_lock_bh(&x->lock);
367 		if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
368 			spin_unlock_bh(&x->lock);
369 			goto error_free;
370 		}
371 
372 		skb_shinfo(skb)->nr_frags = 1;
373 
374 		page = pfrag->page;
375 		get_page(page);
376 		/* replace page frags in skb with new page */
377 		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
378 		pfrag->offset = pfrag->offset + allocsize;
379 		spin_unlock_bh(&x->lock);
380 
381 		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
382 		err = skb_to_sgvec(skb, dsg,
383 			           (unsigned char *)esph - skb->data,
384 			           assoclen + ivlen + esp->clen + alen);
385 		if (unlikely(err < 0))
386 			goto error_free;
387 	}
388 
389 	if ((x->props.flags & XFRM_STATE_ESN))
390 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
391 	else
392 		aead_request_set_callback(req, 0, esp_output_done, skb);
393 
394 	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
395 	aead_request_set_ad(req, assoclen);
396 
397 	memset(iv, 0, ivlen);
398 	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
399 	       min(ivlen, 8));
400 
401 	ESP_SKB_CB(skb)->tmp = tmp;
402 	err = crypto_aead_encrypt(req);
403 
404 	switch (err) {
405 	case -EINPROGRESS:
406 		goto error;
407 
408 	case -ENOSPC:
409 		err = NET_XMIT_DROP;
410 		break;
411 
412 	case 0:
413 		if ((x->props.flags & XFRM_STATE_ESN))
414 			esp_output_restore_header(skb);
415 	}
416 
417 	if (sg != dsg)
418 		esp_ssg_unref(x, tmp);
419 
420 error_free:
421 	kfree(tmp);
422 error:
423 	return err;
424 }
425 EXPORT_SYMBOL_GPL(esp6_output_tail);
426 
427 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
428 {
429 	int alen;
430 	int blksize;
431 	struct ip_esp_hdr *esph;
432 	struct crypto_aead *aead;
433 	struct esp_info esp;
434 
435 	esp.inplace = true;
436 
437 	esp.proto = *skb_mac_header(skb);
438 	*skb_mac_header(skb) = IPPROTO_ESP;
439 
440 	/* skb is pure payload to encrypt */
441 
442 	aead = x->data;
443 	alen = crypto_aead_authsize(aead);
444 
445 	esp.tfclen = 0;
446 	if (x->tfcpad) {
447 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
448 		u32 padto;
449 
450 		padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
451 		if (skb->len < padto)
452 			esp.tfclen = padto - skb->len;
453 	}
454 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
455 	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
456 	esp.plen = esp.clen - skb->len - esp.tfclen;
457 	esp.tailen = esp.tfclen + esp.plen + alen;
458 
459 	esp.nfrags = esp6_output_head(x, skb, &esp);
460 	if (esp.nfrags < 0)
461 		return esp.nfrags;
462 
463 	esph = ip_esp_hdr(skb);
464 	esph->spi = x->id.spi;
465 
466 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
467 	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
468 			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
469 
470 	skb_push(skb, -skb_network_offset(skb));
471 
472 	return esp6_output_tail(x, skb, &esp);
473 }
474 
475 static inline int esp_remove_trailer(struct sk_buff *skb)
476 {
477 	struct xfrm_state *x = xfrm_input_state(skb);
478 	struct xfrm_offload *xo = xfrm_offload(skb);
479 	struct crypto_aead *aead = x->data;
480 	int alen, hlen, elen;
481 	int padlen, trimlen;
482 	__wsum csumdiff;
483 	u8 nexthdr[2];
484 	int ret;
485 
486 	alen = crypto_aead_authsize(aead);
487 	hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
488 	elen = skb->len - hlen;
489 
490 	if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
491 		ret = xo->proto;
492 		goto out;
493 	}
494 
495 	ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
496 	BUG_ON(ret);
497 
498 	ret = -EINVAL;
499 	padlen = nexthdr[0];
500 	if (padlen + 2 + alen >= elen) {
501 		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
502 				    padlen + 2, elen - alen);
503 		goto out;
504 	}
505 
506 	trimlen = alen + padlen + 2;
507 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
508 		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
509 		skb->csum = csum_block_sub(skb->csum, csumdiff,
510 					   skb->len - trimlen);
511 	}
512 	pskb_trim(skb, skb->len - trimlen);
513 
514 	ret = nexthdr[1];
515 
516 out:
517 	return ret;
518 }
519 
520 int esp6_input_done2(struct sk_buff *skb, int err)
521 {
522 	struct xfrm_state *x = xfrm_input_state(skb);
523 	struct xfrm_offload *xo = xfrm_offload(skb);
524 	struct crypto_aead *aead = x->data;
525 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
526 	int hdr_len = skb_network_header_len(skb);
527 
528 	if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
529 		kfree(ESP_SKB_CB(skb)->tmp);
530 
531 	if (unlikely(err))
532 		goto out;
533 
534 	err = esp_remove_trailer(skb);
535 	if (unlikely(err < 0))
536 		goto out;
537 
538 	skb_postpull_rcsum(skb, skb_network_header(skb),
539 			   skb_network_header_len(skb));
540 	skb_pull_rcsum(skb, hlen);
541 	if (x->props.mode == XFRM_MODE_TUNNEL)
542 		skb_reset_transport_header(skb);
543 	else
544 		skb_set_transport_header(skb, -hdr_len);
545 
546 	/* RFC4303: Drop dummy packets without any error */
547 	if (err == IPPROTO_NONE)
548 		err = -EINVAL;
549 
550 out:
551 	return err;
552 }
553 EXPORT_SYMBOL_GPL(esp6_input_done2);
554 
555 static void esp_input_done(struct crypto_async_request *base, int err)
556 {
557 	struct sk_buff *skb = base->data;
558 
559 	xfrm_input_resume(skb, esp6_input_done2(skb, err));
560 }
561 
562 static void esp_input_restore_header(struct sk_buff *skb)
563 {
564 	esp_restore_header(skb, 0);
565 	__skb_pull(skb, 4);
566 }
567 
568 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
569 {
570 	struct xfrm_state *x = xfrm_input_state(skb);
571 
572 	/* For ESN we move the header forward by 4 bytes to
573 	 * accomodate the high bits.  We will move it back after
574 	 * decryption.
575 	 */
576 	if ((x->props.flags & XFRM_STATE_ESN)) {
577 		struct ip_esp_hdr *esph = skb_push(skb, 4);
578 
579 		*seqhi = esph->spi;
580 		esph->spi = esph->seq_no;
581 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
582 	}
583 }
584 
585 static void esp_input_done_esn(struct crypto_async_request *base, int err)
586 {
587 	struct sk_buff *skb = base->data;
588 
589 	esp_input_restore_header(skb);
590 	esp_input_done(base, err);
591 }
592 
593 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
594 {
595 	struct crypto_aead *aead = x->data;
596 	struct aead_request *req;
597 	struct sk_buff *trailer;
598 	int ivlen = crypto_aead_ivsize(aead);
599 	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
600 	int nfrags;
601 	int assoclen;
602 	int seqhilen;
603 	int ret = 0;
604 	void *tmp;
605 	__be32 *seqhi;
606 	u8 *iv;
607 	struct scatterlist *sg;
608 
609 	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
610 		ret = -EINVAL;
611 		goto out;
612 	}
613 
614 	if (elen <= 0) {
615 		ret = -EINVAL;
616 		goto out;
617 	}
618 
619 	assoclen = sizeof(struct ip_esp_hdr);
620 	seqhilen = 0;
621 
622 	if (x->props.flags & XFRM_STATE_ESN) {
623 		seqhilen += sizeof(__be32);
624 		assoclen += seqhilen;
625 	}
626 
627 	if (!skb_cloned(skb)) {
628 		if (!skb_is_nonlinear(skb)) {
629 			nfrags = 1;
630 
631 			goto skip_cow;
632 		} else if (!skb_has_frag_list(skb)) {
633 			nfrags = skb_shinfo(skb)->nr_frags;
634 			nfrags++;
635 
636 			goto skip_cow;
637 		}
638 	}
639 
640 	nfrags = skb_cow_data(skb, 0, &trailer);
641 	if (nfrags < 0) {
642 		ret = -EINVAL;
643 		goto out;
644 	}
645 
646 skip_cow:
647 	ret = -ENOMEM;
648 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
649 	if (!tmp)
650 		goto out;
651 
652 	ESP_SKB_CB(skb)->tmp = tmp;
653 	seqhi = esp_tmp_seqhi(tmp);
654 	iv = esp_tmp_iv(aead, tmp, seqhilen);
655 	req = esp_tmp_req(aead, iv);
656 	sg = esp_req_sg(aead, req);
657 
658 	esp_input_set_header(skb, seqhi);
659 
660 	sg_init_table(sg, nfrags);
661 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
662 	if (unlikely(ret < 0)) {
663 		kfree(tmp);
664 		goto out;
665 	}
666 
667 	skb->ip_summed = CHECKSUM_NONE;
668 
669 	if ((x->props.flags & XFRM_STATE_ESN))
670 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
671 	else
672 		aead_request_set_callback(req, 0, esp_input_done, skb);
673 
674 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
675 	aead_request_set_ad(req, assoclen);
676 
677 	ret = crypto_aead_decrypt(req);
678 	if (ret == -EINPROGRESS)
679 		goto out;
680 
681 	if ((x->props.flags & XFRM_STATE_ESN))
682 		esp_input_restore_header(skb);
683 
684 	ret = esp6_input_done2(skb, ret);
685 
686 out:
687 	return ret;
688 }
689 
690 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
691 {
692 	struct crypto_aead *aead = x->data;
693 	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
694 	unsigned int net_adj;
695 
696 	if (x->props.mode != XFRM_MODE_TUNNEL)
697 		net_adj = sizeof(struct ipv6hdr);
698 	else
699 		net_adj = 0;
700 
701 	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
702 		 net_adj) & ~(blksize - 1)) + net_adj - 2;
703 }
704 
705 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
706 		    u8 type, u8 code, int offset, __be32 info)
707 {
708 	struct net *net = dev_net(skb->dev);
709 	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
710 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
711 	struct xfrm_state *x;
712 
713 	if (type != ICMPV6_PKT_TOOBIG &&
714 	    type != NDISC_REDIRECT)
715 		return 0;
716 
717 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
718 			      esph->spi, IPPROTO_ESP, AF_INET6);
719 	if (!x)
720 		return 0;
721 
722 	if (type == NDISC_REDIRECT)
723 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
724 			     sock_net_uid(net, NULL));
725 	else
726 		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
727 	xfrm_state_put(x);
728 
729 	return 0;
730 }
731 
732 static void esp6_destroy(struct xfrm_state *x)
733 {
734 	struct crypto_aead *aead = x->data;
735 
736 	if (!aead)
737 		return;
738 
739 	crypto_free_aead(aead);
740 }
741 
742 static int esp_init_aead(struct xfrm_state *x)
743 {
744 	char aead_name[CRYPTO_MAX_ALG_NAME];
745 	struct crypto_aead *aead;
746 	int err;
747 
748 	err = -ENAMETOOLONG;
749 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
750 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
751 		goto error;
752 
753 	aead = crypto_alloc_aead(aead_name, 0, 0);
754 	err = PTR_ERR(aead);
755 	if (IS_ERR(aead))
756 		goto error;
757 
758 	x->data = aead;
759 
760 	err = crypto_aead_setkey(aead, x->aead->alg_key,
761 				 (x->aead->alg_key_len + 7) / 8);
762 	if (err)
763 		goto error;
764 
765 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
766 	if (err)
767 		goto error;
768 
769 error:
770 	return err;
771 }
772 
773 static int esp_init_authenc(struct xfrm_state *x)
774 {
775 	struct crypto_aead *aead;
776 	struct crypto_authenc_key_param *param;
777 	struct rtattr *rta;
778 	char *key;
779 	char *p;
780 	char authenc_name[CRYPTO_MAX_ALG_NAME];
781 	unsigned int keylen;
782 	int err;
783 
784 	err = -EINVAL;
785 	if (!x->ealg)
786 		goto error;
787 
788 	err = -ENAMETOOLONG;
789 
790 	if ((x->props.flags & XFRM_STATE_ESN)) {
791 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
792 			     "%s%sauthencesn(%s,%s)%s",
793 			     x->geniv ?: "", x->geniv ? "(" : "",
794 			     x->aalg ? x->aalg->alg_name : "digest_null",
795 			     x->ealg->alg_name,
796 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
797 			goto error;
798 	} else {
799 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
800 			     "%s%sauthenc(%s,%s)%s",
801 			     x->geniv ?: "", x->geniv ? "(" : "",
802 			     x->aalg ? x->aalg->alg_name : "digest_null",
803 			     x->ealg->alg_name,
804 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
805 			goto error;
806 	}
807 
808 	aead = crypto_alloc_aead(authenc_name, 0, 0);
809 	err = PTR_ERR(aead);
810 	if (IS_ERR(aead))
811 		goto error;
812 
813 	x->data = aead;
814 
815 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
816 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
817 	err = -ENOMEM;
818 	key = kmalloc(keylen, GFP_KERNEL);
819 	if (!key)
820 		goto error;
821 
822 	p = key;
823 	rta = (void *)p;
824 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
825 	rta->rta_len = RTA_LENGTH(sizeof(*param));
826 	param = RTA_DATA(rta);
827 	p += RTA_SPACE(sizeof(*param));
828 
829 	if (x->aalg) {
830 		struct xfrm_algo_desc *aalg_desc;
831 
832 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
833 		p += (x->aalg->alg_key_len + 7) / 8;
834 
835 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
836 		BUG_ON(!aalg_desc);
837 
838 		err = -EINVAL;
839 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
840 		    crypto_aead_authsize(aead)) {
841 			pr_info("ESP: %s digestsize %u != %hu\n",
842 				x->aalg->alg_name,
843 				crypto_aead_authsize(aead),
844 				aalg_desc->uinfo.auth.icv_fullbits / 8);
845 			goto free_key;
846 		}
847 
848 		err = crypto_aead_setauthsize(
849 			aead, x->aalg->alg_trunc_len / 8);
850 		if (err)
851 			goto free_key;
852 	}
853 
854 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
855 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
856 
857 	err = crypto_aead_setkey(aead, key, keylen);
858 
859 free_key:
860 	kfree(key);
861 
862 error:
863 	return err;
864 }
865 
866 static int esp6_init_state(struct xfrm_state *x)
867 {
868 	struct crypto_aead *aead;
869 	u32 align;
870 	int err;
871 
872 	if (x->encap)
873 		return -EINVAL;
874 
875 	x->data = NULL;
876 
877 	if (x->aead)
878 		err = esp_init_aead(x);
879 	else
880 		err = esp_init_authenc(x);
881 
882 	if (err)
883 		goto error;
884 
885 	aead = x->data;
886 
887 	x->props.header_len = sizeof(struct ip_esp_hdr) +
888 			      crypto_aead_ivsize(aead);
889 	switch (x->props.mode) {
890 	case XFRM_MODE_BEET:
891 		if (x->sel.family != AF_INET6)
892 			x->props.header_len += IPV4_BEET_PHMAXLEN +
893 					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
894 		break;
895 	default:
896 	case XFRM_MODE_TRANSPORT:
897 		break;
898 	case XFRM_MODE_TUNNEL:
899 		x->props.header_len += sizeof(struct ipv6hdr);
900 		break;
901 	}
902 
903 	align = ALIGN(crypto_aead_blocksize(aead), 4);
904 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
905 
906 error:
907 	return err;
908 }
909 
910 static int esp6_rcv_cb(struct sk_buff *skb, int err)
911 {
912 	return 0;
913 }
914 
915 static const struct xfrm_type esp6_type = {
916 	.description	= "ESP6",
917 	.owner		= THIS_MODULE,
918 	.proto		= IPPROTO_ESP,
919 	.flags		= XFRM_TYPE_REPLAY_PROT,
920 	.init_state	= esp6_init_state,
921 	.destructor	= esp6_destroy,
922 	.get_mtu	= esp6_get_mtu,
923 	.input		= esp6_input,
924 	.output		= esp6_output,
925 	.hdr_offset	= xfrm6_find_1stfragopt,
926 };
927 
928 static struct xfrm6_protocol esp6_protocol = {
929 	.handler	=	xfrm6_rcv,
930 	.cb_handler	=	esp6_rcv_cb,
931 	.err_handler	=	esp6_err,
932 	.priority	=	0,
933 };
934 
935 static int __init esp6_init(void)
936 {
937 	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
938 		pr_info("%s: can't add xfrm type\n", __func__);
939 		return -EAGAIN;
940 	}
941 	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
942 		pr_info("%s: can't add protocol\n", __func__);
943 		xfrm_unregister_type(&esp6_type, AF_INET6);
944 		return -EAGAIN;
945 	}
946 
947 	return 0;
948 }
949 
950 static void __exit esp6_fini(void)
951 {
952 	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
953 		pr_info("%s: can't remove protocol\n", __func__);
954 	if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
955 		pr_info("%s: can't remove xfrm type\n", __func__);
956 }
957 
958 module_init(esp6_init);
959 module_exit(esp6_fini);
960 
961 MODULE_LICENSE("GPL");
962 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
963