xref: /linux/net/ipv4/esp4.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 #define pr_fmt(fmt) "IPsec: " fmt
2 
3 #include <crypto/aead.h>
4 #include <crypto/authenc.h>
5 #include <linux/err.h>
6 #include <linux/module.h>
7 #include <net/ip.h>
8 #include <net/xfrm.h>
9 #include <net/esp.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kernel.h>
12 #include <linux/pfkeyv2.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/in6.h>
17 #include <net/icmp.h>
18 #include <net/protocol.h>
19 #include <net/udp.h>
20 
21 #include <linux/highmem.h>
22 
23 struct esp_skb_cb {
24 	struct xfrm_skb_cb xfrm;
25 	void *tmp;
26 };
27 
28 struct esp_output_extra {
29 	__be32 seqhi;
30 	u32 esphoff;
31 };
32 
33 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
34 
35 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
36 
37 /*
38  * Allocate an AEAD request structure with extra space for SG and IV.
39  *
40  * For alignment considerations the IV is placed at the front, followed
41  * by the request and finally the SG list.
42  *
43  * TODO: Use spare space in skb for this where possible.
44  */
45 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
46 {
47 	unsigned int len;
48 
49 	len = extralen;
50 
51 	len += crypto_aead_ivsize(aead);
52 
53 	if (len) {
54 		len += crypto_aead_alignmask(aead) &
55 		       ~(crypto_tfm_ctx_alignment() - 1);
56 		len = ALIGN(len, crypto_tfm_ctx_alignment());
57 	}
58 
59 	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
60 	len = ALIGN(len, __alignof__(struct scatterlist));
61 
62 	len += sizeof(struct scatterlist) * nfrags;
63 
64 	return kmalloc(len, GFP_ATOMIC);
65 }
66 
67 static inline void *esp_tmp_extra(void *tmp)
68 {
69 	return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
70 }
71 
72 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
73 {
74 	return crypto_aead_ivsize(aead) ?
75 	       PTR_ALIGN((u8 *)tmp + extralen,
76 			 crypto_aead_alignmask(aead) + 1) : tmp + extralen;
77 }
78 
79 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
80 {
81 	struct aead_request *req;
82 
83 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
84 				crypto_tfm_ctx_alignment());
85 	aead_request_set_tfm(req, aead);
86 	return req;
87 }
88 
89 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
90 					     struct aead_request *req)
91 {
92 	return (void *)ALIGN((unsigned long)(req + 1) +
93 			     crypto_aead_reqsize(aead),
94 			     __alignof__(struct scatterlist));
95 }
96 
97 static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
98 {
99 	struct esp_output_extra *extra = esp_tmp_extra(tmp);
100 	struct crypto_aead *aead = x->data;
101 	int extralen = 0;
102 	u8 *iv;
103 	struct aead_request *req;
104 	struct scatterlist *sg;
105 
106 	if (x->props.flags & XFRM_STATE_ESN)
107 		extralen += sizeof(*extra);
108 
109 	extra = esp_tmp_extra(tmp);
110 	iv = esp_tmp_iv(aead, tmp, extralen);
111 	req = esp_tmp_req(aead, iv);
112 
113 	/* Unref skb_frag_pages in the src scatterlist if necessary.
114 	 * Skip the first sg which comes from skb->data.
115 	 */
116 	if (req->src != req->dst)
117 		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
118 			put_page(sg_page(sg));
119 }
120 
121 static void esp_output_done(struct crypto_async_request *base, int err)
122 {
123 	struct sk_buff *skb = base->data;
124 	void *tmp;
125 	struct dst_entry *dst = skb_dst(skb);
126 	struct xfrm_state *x = dst->xfrm;
127 
128 	tmp = ESP_SKB_CB(skb)->tmp;
129 	esp_ssg_unref(x, tmp);
130 	kfree(tmp);
131 	xfrm_output_resume(skb, err);
132 }
133 
134 /* Move ESP header back into place. */
135 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
136 {
137 	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
138 	void *tmp = ESP_SKB_CB(skb)->tmp;
139 	__be32 *seqhi = esp_tmp_extra(tmp);
140 
141 	esph->seq_no = esph->spi;
142 	esph->spi = *seqhi;
143 }
144 
145 static void esp_output_restore_header(struct sk_buff *skb)
146 {
147 	void *tmp = ESP_SKB_CB(skb)->tmp;
148 	struct esp_output_extra *extra = esp_tmp_extra(tmp);
149 
150 	esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
151 				sizeof(__be32));
152 }
153 
154 static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
155 					       struct ip_esp_hdr *esph,
156 					       struct esp_output_extra *extra)
157 {
158 	struct xfrm_state *x = skb_dst(skb)->xfrm;
159 
160 	/* For ESN we move the header forward by 4 bytes to
161 	 * accomodate the high bits.  We will move it back after
162 	 * encryption.
163 	 */
164 	if ((x->props.flags & XFRM_STATE_ESN)) {
165 		extra->esphoff = (unsigned char *)esph -
166 				 skb_transport_header(skb);
167 		esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
168 		extra->seqhi = esph->spi;
169 		esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
170 	}
171 
172 	esph->spi = x->id.spi;
173 
174 	return esph;
175 }
176 
177 static void esp_output_done_esn(struct crypto_async_request *base, int err)
178 {
179 	struct sk_buff *skb = base->data;
180 
181 	esp_output_restore_header(skb);
182 	esp_output_done(base, err);
183 }
184 
185 static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
186 {
187 	/* Fill padding... */
188 	if (tfclen) {
189 		memset(tail, 0, tfclen);
190 		tail += tfclen;
191 	}
192 	do {
193 		int i;
194 		for (i = 0; i < plen - 2; i++)
195 			tail[i] = i + 1;
196 	} while (0);
197 	tail[plen - 2] = plen - 2;
198 	tail[plen - 1] = proto;
199 }
200 
201 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
202 {
203 	struct esp_output_extra *extra;
204 	int err = -ENOMEM;
205 	struct ip_esp_hdr *esph;
206 	struct crypto_aead *aead;
207 	struct aead_request *req;
208 	struct scatterlist *sg, *dsg;
209 	struct sk_buff *trailer;
210 	struct page *page;
211 	void *tmp;
212 	u8 *iv;
213 	u8 *tail;
214 	u8 *vaddr;
215 	int blksize;
216 	int clen;
217 	int alen;
218 	int plen;
219 	int ivlen;
220 	int tfclen;
221 	int nfrags;
222 	int assoclen;
223 	int extralen;
224 	int tailen;
225 	__be64 seqno;
226 	__u8 proto = *skb_mac_header(skb);
227 
228 	/* skb is pure payload to encrypt */
229 
230 	aead = x->data;
231 	alen = crypto_aead_authsize(aead);
232 	ivlen = crypto_aead_ivsize(aead);
233 
234 	tfclen = 0;
235 	if (x->tfcpad) {
236 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
237 		u32 padto;
238 
239 		padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
240 		if (skb->len < padto)
241 			tfclen = padto - skb->len;
242 	}
243 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
244 	clen = ALIGN(skb->len + 2 + tfclen, blksize);
245 	plen = clen - skb->len - tfclen;
246 	tailen = tfclen + plen + alen;
247 	assoclen = sizeof(*esph);
248 	extralen = 0;
249 
250 	if (x->props.flags & XFRM_STATE_ESN) {
251 		extralen += sizeof(*extra);
252 		assoclen += sizeof(__be32);
253 	}
254 
255 	*skb_mac_header(skb) = IPPROTO_ESP;
256 	esph = ip_esp_hdr(skb);
257 
258 	/* this is non-NULL only with UDP Encapsulation */
259 	if (x->encap) {
260 		struct xfrm_encap_tmpl *encap = x->encap;
261 		struct udphdr *uh;
262 		__be32 *udpdata32;
263 		__be16 sport, dport;
264 		int encap_type;
265 
266 		spin_lock_bh(&x->lock);
267 		sport = encap->encap_sport;
268 		dport = encap->encap_dport;
269 		encap_type = encap->encap_type;
270 		spin_unlock_bh(&x->lock);
271 
272 		uh = (struct udphdr *)esph;
273 		uh->source = sport;
274 		uh->dest = dport;
275 		uh->len = htons(skb->len + tailen
276 				- skb_transport_offset(skb));
277 		uh->check = 0;
278 
279 		switch (encap_type) {
280 		default:
281 		case UDP_ENCAP_ESPINUDP:
282 			esph = (struct ip_esp_hdr *)(uh + 1);
283 			break;
284 		case UDP_ENCAP_ESPINUDP_NON_IKE:
285 			udpdata32 = (__be32 *)(uh + 1);
286 			udpdata32[0] = udpdata32[1] = 0;
287 			esph = (struct ip_esp_hdr *)(udpdata32 + 2);
288 			break;
289 		}
290 
291 		*skb_mac_header(skb) = IPPROTO_UDP;
292 	}
293 
294 	if (!skb_cloned(skb)) {
295 		if (tailen <= skb_availroom(skb)) {
296 			nfrags = 1;
297 			trailer = skb;
298 			tail = skb_tail_pointer(trailer);
299 
300 			goto skip_cow;
301 		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
302 			   && !skb_has_frag_list(skb)) {
303 			int allocsize;
304 			struct sock *sk = skb->sk;
305 			struct page_frag *pfrag = &x->xfrag;
306 
307 			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
308 
309 			spin_lock_bh(&x->lock);
310 
311 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
312 				spin_unlock_bh(&x->lock);
313 				goto cow;
314 			}
315 
316 			page = pfrag->page;
317 			get_page(page);
318 
319 			vaddr = kmap_atomic(page);
320 
321 			tail = vaddr + pfrag->offset;
322 
323 			esp_output_fill_trailer(tail, tfclen, plen, proto);
324 
325 			kunmap_atomic(vaddr);
326 
327 			nfrags = skb_shinfo(skb)->nr_frags;
328 
329 			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
330 					     tailen);
331 			skb_shinfo(skb)->nr_frags = ++nfrags;
332 
333 			pfrag->offset = pfrag->offset + allocsize;
334 			nfrags++;
335 
336 			skb->len += tailen;
337 			skb->data_len += tailen;
338 			skb->truesize += tailen;
339 			if (sk)
340 				atomic_add(tailen, &sk->sk_wmem_alloc);
341 
342 			skb_push(skb, -skb_network_offset(skb));
343 
344 			esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
345 			esph->spi = x->id.spi;
346 
347 			tmp = esp_alloc_tmp(aead, nfrags + 2, extralen);
348 			if (!tmp) {
349 				spin_unlock_bh(&x->lock);
350 				err = -ENOMEM;
351 				goto error;
352 			}
353 
354 			extra = esp_tmp_extra(tmp);
355 			iv = esp_tmp_iv(aead, tmp, extralen);
356 			req = esp_tmp_req(aead, iv);
357 			sg = esp_req_sg(aead, req);
358 			dsg = &sg[nfrags];
359 
360 			esph = esp_output_set_extra(skb, esph, extra);
361 
362 			sg_init_table(sg, nfrags);
363 			skb_to_sgvec(skb, sg,
364 				     (unsigned char *)esph - skb->data,
365 				     assoclen + ivlen + clen + alen);
366 
367 			allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
368 
369 			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
370 				spin_unlock_bh(&x->lock);
371 				err = -ENOMEM;
372 				goto error;
373 			}
374 
375 			skb_shinfo(skb)->nr_frags = 1;
376 
377 			page = pfrag->page;
378 			get_page(page);
379 			/* replace page frags in skb with new page */
380 			__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
381 			pfrag->offset = pfrag->offset + allocsize;
382 
383 			sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
384 			skb_to_sgvec(skb, dsg,
385 				     (unsigned char *)esph - skb->data,
386 				     assoclen + ivlen + clen + alen);
387 
388 			spin_unlock_bh(&x->lock);
389 
390 			goto skip_cow2;
391 		}
392 	}
393 
394 cow:
395 	err = skb_cow_data(skb, tailen, &trailer);
396 	if (err < 0)
397 		goto error;
398 	nfrags = err;
399 	tail = skb_tail_pointer(trailer);
400 	esph = ip_esp_hdr(skb);
401 
402 skip_cow:
403 	esp_output_fill_trailer(tail, tfclen, plen, proto);
404 
405 	pskb_put(skb, trailer, clen - skb->len + alen);
406 	skb_push(skb, -skb_network_offset(skb));
407 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
408 	esph->spi = x->id.spi;
409 
410 	tmp = esp_alloc_tmp(aead, nfrags, extralen);
411 	if (!tmp) {
412 		err = -ENOMEM;
413 		goto error;
414 	}
415 
416 	extra = esp_tmp_extra(tmp);
417 	iv = esp_tmp_iv(aead, tmp, extralen);
418 	req = esp_tmp_req(aead, iv);
419 	sg = esp_req_sg(aead, req);
420 	dsg = sg;
421 
422 	esph = esp_output_set_extra(skb, esph, extra);
423 
424 	sg_init_table(sg, nfrags);
425 	skb_to_sgvec(skb, sg,
426 		     (unsigned char *)esph - skb->data,
427 		     assoclen + ivlen + clen + alen);
428 
429 skip_cow2:
430 	if ((x->props.flags & XFRM_STATE_ESN))
431 		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
432 	else
433 		aead_request_set_callback(req, 0, esp_output_done, skb);
434 
435 	aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv);
436 	aead_request_set_ad(req, assoclen);
437 
438 	seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
439 			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
440 
441 	memset(iv, 0, ivlen);
442 	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8),
443 	       min(ivlen, 8));
444 
445 	ESP_SKB_CB(skb)->tmp = tmp;
446 	err = crypto_aead_encrypt(req);
447 
448 	switch (err) {
449 	case -EINPROGRESS:
450 		goto error;
451 
452 	case -EBUSY:
453 		err = NET_XMIT_DROP;
454 		break;
455 
456 	case 0:
457 		if ((x->props.flags & XFRM_STATE_ESN))
458 			esp_output_restore_header(skb);
459 	}
460 
461 	if (sg != dsg)
462 		esp_ssg_unref(x, tmp);
463 	kfree(tmp);
464 
465 error:
466 	return err;
467 }
468 
469 static int esp_input_done2(struct sk_buff *skb, int err)
470 {
471 	const struct iphdr *iph;
472 	struct xfrm_state *x = xfrm_input_state(skb);
473 	struct crypto_aead *aead = x->data;
474 	int alen = crypto_aead_authsize(aead);
475 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
476 	int elen = skb->len - hlen;
477 	int ihl;
478 	u8 nexthdr[2];
479 	int padlen;
480 
481 	kfree(ESP_SKB_CB(skb)->tmp);
482 
483 	if (unlikely(err))
484 		goto out;
485 
486 	if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
487 		BUG();
488 
489 	err = -EINVAL;
490 	padlen = nexthdr[0];
491 	if (padlen + 2 + alen >= elen)
492 		goto out;
493 
494 	/* ... check padding bits here. Silly. :-) */
495 
496 	iph = ip_hdr(skb);
497 	ihl = iph->ihl * 4;
498 
499 	if (x->encap) {
500 		struct xfrm_encap_tmpl *encap = x->encap;
501 		struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
502 
503 		/*
504 		 * 1) if the NAT-T peer's IP or port changed then
505 		 *    advertize the change to the keying daemon.
506 		 *    This is an inbound SA, so just compare
507 		 *    SRC ports.
508 		 */
509 		if (iph->saddr != x->props.saddr.a4 ||
510 		    uh->source != encap->encap_sport) {
511 			xfrm_address_t ipaddr;
512 
513 			ipaddr.a4 = iph->saddr;
514 			km_new_mapping(x, &ipaddr, uh->source);
515 
516 			/* XXX: perhaps add an extra
517 			 * policy check here, to see
518 			 * if we should allow or
519 			 * reject a packet from a
520 			 * different source
521 			 * address/port.
522 			 */
523 		}
524 
525 		/*
526 		 * 2) ignore UDP/TCP checksums in case
527 		 *    of NAT-T in Transport Mode, or
528 		 *    perform other post-processing fixes
529 		 *    as per draft-ietf-ipsec-udp-encaps-06,
530 		 *    section 3.1.2
531 		 */
532 		if (x->props.mode == XFRM_MODE_TRANSPORT)
533 			skb->ip_summed = CHECKSUM_UNNECESSARY;
534 	}
535 
536 	pskb_trim(skb, skb->len - alen - padlen - 2);
537 	__skb_pull(skb, hlen);
538 	if (x->props.mode == XFRM_MODE_TUNNEL)
539 		skb_reset_transport_header(skb);
540 	else
541 		skb_set_transport_header(skb, -ihl);
542 
543 	err = nexthdr[1];
544 
545 	/* RFC4303: Drop dummy packets without any error */
546 	if (err == IPPROTO_NONE)
547 		err = -EINVAL;
548 
549 out:
550 	return err;
551 }
552 
553 static void esp_input_done(struct crypto_async_request *base, int err)
554 {
555 	struct sk_buff *skb = base->data;
556 
557 	xfrm_input_resume(skb, esp_input_done2(skb, err));
558 }
559 
560 static void esp_input_restore_header(struct sk_buff *skb)
561 {
562 	esp_restore_header(skb, 0);
563 	__skb_pull(skb, 4);
564 }
565 
566 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
567 {
568 	struct xfrm_state *x = xfrm_input_state(skb);
569 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
570 
571 	/* For ESN we move the header forward by 4 bytes to
572 	 * accomodate the high bits.  We will move it back after
573 	 * decryption.
574 	 */
575 	if ((x->props.flags & XFRM_STATE_ESN)) {
576 		esph = (void *)skb_push(skb, 4);
577 		*seqhi = esph->spi;
578 		esph->spi = esph->seq_no;
579 		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
580 	}
581 }
582 
583 static void esp_input_done_esn(struct crypto_async_request *base, int err)
584 {
585 	struct sk_buff *skb = base->data;
586 
587 	esp_input_restore_header(skb);
588 	esp_input_done(base, err);
589 }
590 
591 /*
592  * Note: detecting truncated vs. non-truncated authentication data is very
593  * expensive, so we only support truncated data, which is the recommended
594  * and common case.
595  */
596 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
597 {
598 	struct ip_esp_hdr *esph;
599 	struct crypto_aead *aead = x->data;
600 	struct aead_request *req;
601 	struct sk_buff *trailer;
602 	int ivlen = crypto_aead_ivsize(aead);
603 	int elen = skb->len - sizeof(*esph) - ivlen;
604 	int nfrags;
605 	int assoclen;
606 	int seqhilen;
607 	__be32 *seqhi;
608 	void *tmp;
609 	u8 *iv;
610 	struct scatterlist *sg;
611 	int err = -EINVAL;
612 
613 	if (!pskb_may_pull(skb, sizeof(*esph) + ivlen))
614 		goto out;
615 
616 	if (elen <= 0)
617 		goto out;
618 
619 	assoclen = sizeof(*esph);
620 	seqhilen = 0;
621 
622 	if (x->props.flags & XFRM_STATE_ESN) {
623 		seqhilen += sizeof(__be32);
624 		assoclen += seqhilen;
625 	}
626 
627 	if (!skb_cloned(skb)) {
628 		if (!skb_is_nonlinear(skb)) {
629 			nfrags = 1;
630 
631 			goto skip_cow;
632 		} else if (!skb_has_frag_list(skb)) {
633 			nfrags = skb_shinfo(skb)->nr_frags;
634 			nfrags++;
635 
636 			goto skip_cow;
637 		}
638 	}
639 
640 	err = skb_cow_data(skb, 0, &trailer);
641 	if (err < 0)
642 		goto out;
643 
644 	nfrags = err;
645 
646 skip_cow:
647 	err = -ENOMEM;
648 	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
649 	if (!tmp)
650 		goto out;
651 
652 	ESP_SKB_CB(skb)->tmp = tmp;
653 	seqhi = esp_tmp_extra(tmp);
654 	iv = esp_tmp_iv(aead, tmp, seqhilen);
655 	req = esp_tmp_req(aead, iv);
656 	sg = esp_req_sg(aead, req);
657 
658 	esp_input_set_header(skb, seqhi);
659 
660 	sg_init_table(sg, nfrags);
661 	skb_to_sgvec(skb, sg, 0, skb->len);
662 
663 	skb->ip_summed = CHECKSUM_NONE;
664 
665 	if ((x->props.flags & XFRM_STATE_ESN))
666 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
667 	else
668 		aead_request_set_callback(req, 0, esp_input_done, skb);
669 
670 	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
671 	aead_request_set_ad(req, assoclen);
672 
673 	err = crypto_aead_decrypt(req);
674 	if (err == -EINPROGRESS)
675 		goto out;
676 
677 	if ((x->props.flags & XFRM_STATE_ESN))
678 		esp_input_restore_header(skb);
679 
680 	err = esp_input_done2(skb, err);
681 
682 out:
683 	return err;
684 }
685 
686 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
687 {
688 	struct crypto_aead *aead = x->data;
689 	u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
690 	unsigned int net_adj;
691 
692 	switch (x->props.mode) {
693 	case XFRM_MODE_TRANSPORT:
694 	case XFRM_MODE_BEET:
695 		net_adj = sizeof(struct iphdr);
696 		break;
697 	case XFRM_MODE_TUNNEL:
698 		net_adj = 0;
699 		break;
700 	default:
701 		BUG();
702 	}
703 
704 	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
705 		 net_adj) & ~(blksize - 1)) + net_adj - 2;
706 }
707 
708 static int esp4_err(struct sk_buff *skb, u32 info)
709 {
710 	struct net *net = dev_net(skb->dev);
711 	const struct iphdr *iph = (const struct iphdr *)skb->data;
712 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
713 	struct xfrm_state *x;
714 
715 	switch (icmp_hdr(skb)->type) {
716 	case ICMP_DEST_UNREACH:
717 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
718 			return 0;
719 	case ICMP_REDIRECT:
720 		break;
721 	default:
722 		return 0;
723 	}
724 
725 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
726 			      esph->spi, IPPROTO_ESP, AF_INET);
727 	if (!x)
728 		return 0;
729 
730 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
731 		ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
732 	else
733 		ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
734 	xfrm_state_put(x);
735 
736 	return 0;
737 }
738 
739 static void esp_destroy(struct xfrm_state *x)
740 {
741 	struct crypto_aead *aead = x->data;
742 
743 	if (!aead)
744 		return;
745 
746 	crypto_free_aead(aead);
747 }
748 
749 static int esp_init_aead(struct xfrm_state *x)
750 {
751 	char aead_name[CRYPTO_MAX_ALG_NAME];
752 	struct crypto_aead *aead;
753 	int err;
754 
755 	err = -ENAMETOOLONG;
756 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
757 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
758 		goto error;
759 
760 	aead = crypto_alloc_aead(aead_name, 0, 0);
761 	err = PTR_ERR(aead);
762 	if (IS_ERR(aead))
763 		goto error;
764 
765 	x->data = aead;
766 
767 	err = crypto_aead_setkey(aead, x->aead->alg_key,
768 				 (x->aead->alg_key_len + 7) / 8);
769 	if (err)
770 		goto error;
771 
772 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
773 	if (err)
774 		goto error;
775 
776 error:
777 	return err;
778 }
779 
780 static int esp_init_authenc(struct xfrm_state *x)
781 {
782 	struct crypto_aead *aead;
783 	struct crypto_authenc_key_param *param;
784 	struct rtattr *rta;
785 	char *key;
786 	char *p;
787 	char authenc_name[CRYPTO_MAX_ALG_NAME];
788 	unsigned int keylen;
789 	int err;
790 
791 	err = -EINVAL;
792 	if (!x->ealg)
793 		goto error;
794 
795 	err = -ENAMETOOLONG;
796 
797 	if ((x->props.flags & XFRM_STATE_ESN)) {
798 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
799 			     "%s%sauthencesn(%s,%s)%s",
800 			     x->geniv ?: "", x->geniv ? "(" : "",
801 			     x->aalg ? x->aalg->alg_name : "digest_null",
802 			     x->ealg->alg_name,
803 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
804 			goto error;
805 	} else {
806 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
807 			     "%s%sauthenc(%s,%s)%s",
808 			     x->geniv ?: "", x->geniv ? "(" : "",
809 			     x->aalg ? x->aalg->alg_name : "digest_null",
810 			     x->ealg->alg_name,
811 			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
812 			goto error;
813 	}
814 
815 	aead = crypto_alloc_aead(authenc_name, 0, 0);
816 	err = PTR_ERR(aead);
817 	if (IS_ERR(aead))
818 		goto error;
819 
820 	x->data = aead;
821 
822 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
823 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
824 	err = -ENOMEM;
825 	key = kmalloc(keylen, GFP_KERNEL);
826 	if (!key)
827 		goto error;
828 
829 	p = key;
830 	rta = (void *)p;
831 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
832 	rta->rta_len = RTA_LENGTH(sizeof(*param));
833 	param = RTA_DATA(rta);
834 	p += RTA_SPACE(sizeof(*param));
835 
836 	if (x->aalg) {
837 		struct xfrm_algo_desc *aalg_desc;
838 
839 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
840 		p += (x->aalg->alg_key_len + 7) / 8;
841 
842 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
843 		BUG_ON(!aalg_desc);
844 
845 		err = -EINVAL;
846 		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
847 		    crypto_aead_authsize(aead)) {
848 			pr_info("ESP: %s digestsize %u != %hu\n",
849 				x->aalg->alg_name,
850 				crypto_aead_authsize(aead),
851 				aalg_desc->uinfo.auth.icv_fullbits / 8);
852 			goto free_key;
853 		}
854 
855 		err = crypto_aead_setauthsize(
856 			aead, x->aalg->alg_trunc_len / 8);
857 		if (err)
858 			goto free_key;
859 	}
860 
861 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
862 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
863 
864 	err = crypto_aead_setkey(aead, key, keylen);
865 
866 free_key:
867 	kfree(key);
868 
869 error:
870 	return err;
871 }
872 
873 static int esp_init_state(struct xfrm_state *x)
874 {
875 	struct crypto_aead *aead;
876 	u32 align;
877 	int err;
878 
879 	x->data = NULL;
880 
881 	if (x->aead)
882 		err = esp_init_aead(x);
883 	else
884 		err = esp_init_authenc(x);
885 
886 	if (err)
887 		goto error;
888 
889 	aead = x->data;
890 
891 	x->props.header_len = sizeof(struct ip_esp_hdr) +
892 			      crypto_aead_ivsize(aead);
893 	if (x->props.mode == XFRM_MODE_TUNNEL)
894 		x->props.header_len += sizeof(struct iphdr);
895 	else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
896 		x->props.header_len += IPV4_BEET_PHMAXLEN;
897 	if (x->encap) {
898 		struct xfrm_encap_tmpl *encap = x->encap;
899 
900 		switch (encap->encap_type) {
901 		default:
902 			goto error;
903 		case UDP_ENCAP_ESPINUDP:
904 			x->props.header_len += sizeof(struct udphdr);
905 			break;
906 		case UDP_ENCAP_ESPINUDP_NON_IKE:
907 			x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
908 			break;
909 		}
910 	}
911 
912 	align = ALIGN(crypto_aead_blocksize(aead), 4);
913 	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
914 
915 error:
916 	return err;
917 }
918 
919 static int esp4_rcv_cb(struct sk_buff *skb, int err)
920 {
921 	return 0;
922 }
923 
924 static const struct xfrm_type esp_type =
925 {
926 	.description	= "ESP4",
927 	.owner		= THIS_MODULE,
928 	.proto	     	= IPPROTO_ESP,
929 	.flags		= XFRM_TYPE_REPLAY_PROT,
930 	.init_state	= esp_init_state,
931 	.destructor	= esp_destroy,
932 	.get_mtu	= esp4_get_mtu,
933 	.input		= esp_input,
934 	.output		= esp_output
935 };
936 
937 static struct xfrm4_protocol esp4_protocol = {
938 	.handler	=	xfrm4_rcv,
939 	.input_handler	=	xfrm_input,
940 	.cb_handler	=	esp4_rcv_cb,
941 	.err_handler	=	esp4_err,
942 	.priority	=	0,
943 };
944 
945 static int __init esp4_init(void)
946 {
947 	if (xfrm_register_type(&esp_type, AF_INET) < 0) {
948 		pr_info("%s: can't add xfrm type\n", __func__);
949 		return -EAGAIN;
950 	}
951 	if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
952 		pr_info("%s: can't add protocol\n", __func__);
953 		xfrm_unregister_type(&esp_type, AF_INET);
954 		return -EAGAIN;
955 	}
956 	return 0;
957 }
958 
959 static void __exit esp4_fini(void)
960 {
961 	if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
962 		pr_info("%s: can't remove protocol\n", __func__);
963 	if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
964 		pr_info("%s: can't remove xfrm type\n", __func__);
965 }
966 
967 module_init(esp4_init);
968 module_exit(esp4_fini);
969 MODULE_LICENSE("GPL");
970 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
971