xref: /linux/net/ipv4/esp4.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 #define pr_fmt(fmt) "IPsec: " fmt
2 
3 #include <crypto/aead.h>
4 #include <crypto/authenc.h>
5 #include <linux/err.h>
6 #include <linux/module.h>
7 #include <net/ip.h>
8 #include <net/xfrm.h>
9 #include <net/esp.h>
10 #include <linux/scatterlist.h>
11 #include <linux/kernel.h>
12 #include <linux/pfkeyv2.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/in6.h>
17 #include <net/icmp.h>
18 #include <net/protocol.h>
19 #include <net/udp.h>
20 
21 struct esp_skb_cb {
22 	struct xfrm_skb_cb xfrm;
23 	void *tmp;
24 };
25 
26 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
27 
28 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
29 
30 /*
31  * Allocate an AEAD request structure with extra space for SG and IV.
32  *
33  * For alignment considerations the IV is placed at the front, followed
34  * by the request and finally the SG list.
35  *
36  * TODO: Use spare space in skb for this where possible.
37  */
38 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
39 {
40 	unsigned int len;
41 
42 	len = seqhilen;
43 
44 	len += crypto_aead_ivsize(aead);
45 
46 	if (len) {
47 		len += crypto_aead_alignmask(aead) &
48 		       ~(crypto_tfm_ctx_alignment() - 1);
49 		len = ALIGN(len, crypto_tfm_ctx_alignment());
50 	}
51 
52 	len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
53 	len = ALIGN(len, __alignof__(struct scatterlist));
54 
55 	len += sizeof(struct scatterlist) * nfrags;
56 
57 	return kmalloc(len, GFP_ATOMIC);
58 }
59 
60 static inline __be32 *esp_tmp_seqhi(void *tmp)
61 {
62 	return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
63 }
64 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
65 {
66 	return crypto_aead_ivsize(aead) ?
67 	       PTR_ALIGN((u8 *)tmp + seqhilen,
68 			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
69 }
70 
71 static inline struct aead_givcrypt_request *esp_tmp_givreq(
72 	struct crypto_aead *aead, u8 *iv)
73 {
74 	struct aead_givcrypt_request *req;
75 
76 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
77 				crypto_tfm_ctx_alignment());
78 	aead_givcrypt_set_tfm(req, aead);
79 	return req;
80 }
81 
82 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
83 {
84 	struct aead_request *req;
85 
86 	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
87 				crypto_tfm_ctx_alignment());
88 	aead_request_set_tfm(req, aead);
89 	return req;
90 }
91 
92 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
93 					     struct aead_request *req)
94 {
95 	return (void *)ALIGN((unsigned long)(req + 1) +
96 			     crypto_aead_reqsize(aead),
97 			     __alignof__(struct scatterlist));
98 }
99 
100 static inline struct scatterlist *esp_givreq_sg(
101 	struct crypto_aead *aead, struct aead_givcrypt_request *req)
102 {
103 	return (void *)ALIGN((unsigned long)(req + 1) +
104 			     crypto_aead_reqsize(aead),
105 			     __alignof__(struct scatterlist));
106 }
107 
108 static void esp_output_done(struct crypto_async_request *base, int err)
109 {
110 	struct sk_buff *skb = base->data;
111 
112 	kfree(ESP_SKB_CB(skb)->tmp);
113 	xfrm_output_resume(skb, err);
114 }
115 
116 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
117 {
118 	int err;
119 	struct ip_esp_hdr *esph;
120 	struct crypto_aead *aead;
121 	struct aead_givcrypt_request *req;
122 	struct scatterlist *sg;
123 	struct scatterlist *asg;
124 	struct esp_data *esp;
125 	struct sk_buff *trailer;
126 	void *tmp;
127 	u8 *iv;
128 	u8 *tail;
129 	int blksize;
130 	int clen;
131 	int alen;
132 	int plen;
133 	int tfclen;
134 	int nfrags;
135 	int assoclen;
136 	int sglists;
137 	int seqhilen;
138 	__be32 *seqhi;
139 
140 	/* skb is pure payload to encrypt */
141 
142 	err = -ENOMEM;
143 
144 	esp = x->data;
145 	aead = esp->aead;
146 	alen = crypto_aead_authsize(aead);
147 
148 	tfclen = 0;
149 	if (x->tfcpad) {
150 		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
151 		u32 padto;
152 
153 		padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
154 		if (skb->len < padto)
155 			tfclen = padto - skb->len;
156 	}
157 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
158 	clen = ALIGN(skb->len + 2 + tfclen, blksize);
159 	if (esp->padlen)
160 		clen = ALIGN(clen, esp->padlen);
161 	plen = clen - skb->len - tfclen;
162 
163 	err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
164 	if (err < 0)
165 		goto error;
166 	nfrags = err;
167 
168 	assoclen = sizeof(*esph);
169 	sglists = 1;
170 	seqhilen = 0;
171 
172 	if (x->props.flags & XFRM_STATE_ESN) {
173 		sglists += 2;
174 		seqhilen += sizeof(__be32);
175 		assoclen += seqhilen;
176 	}
177 
178 	tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
179 	if (!tmp)
180 		goto error;
181 
182 	seqhi = esp_tmp_seqhi(tmp);
183 	iv = esp_tmp_iv(aead, tmp, seqhilen);
184 	req = esp_tmp_givreq(aead, iv);
185 	asg = esp_givreq_sg(aead, req);
186 	sg = asg + sglists;
187 
188 	/* Fill padding... */
189 	tail = skb_tail_pointer(trailer);
190 	if (tfclen) {
191 		memset(tail, 0, tfclen);
192 		tail += tfclen;
193 	}
194 	do {
195 		int i;
196 		for (i = 0; i < plen - 2; i++)
197 			tail[i] = i + 1;
198 	} while (0);
199 	tail[plen - 2] = plen - 2;
200 	tail[plen - 1] = *skb_mac_header(skb);
201 	pskb_put(skb, trailer, clen - skb->len + alen);
202 
203 	skb_push(skb, -skb_network_offset(skb));
204 	esph = ip_esp_hdr(skb);
205 	*skb_mac_header(skb) = IPPROTO_ESP;
206 
207 	/* this is non-NULL only with UDP Encapsulation */
208 	if (x->encap) {
209 		struct xfrm_encap_tmpl *encap = x->encap;
210 		struct udphdr *uh;
211 		__be32 *udpdata32;
212 		__be16 sport, dport;
213 		int encap_type;
214 
215 		spin_lock_bh(&x->lock);
216 		sport = encap->encap_sport;
217 		dport = encap->encap_dport;
218 		encap_type = encap->encap_type;
219 		spin_unlock_bh(&x->lock);
220 
221 		uh = (struct udphdr *)esph;
222 		uh->source = sport;
223 		uh->dest = dport;
224 		uh->len = htons(skb->len - skb_transport_offset(skb));
225 		uh->check = 0;
226 
227 		switch (encap_type) {
228 		default:
229 		case UDP_ENCAP_ESPINUDP:
230 			esph = (struct ip_esp_hdr *)(uh + 1);
231 			break;
232 		case UDP_ENCAP_ESPINUDP_NON_IKE:
233 			udpdata32 = (__be32 *)(uh + 1);
234 			udpdata32[0] = udpdata32[1] = 0;
235 			esph = (struct ip_esp_hdr *)(udpdata32 + 2);
236 			break;
237 		}
238 
239 		*skb_mac_header(skb) = IPPROTO_UDP;
240 	}
241 
242 	esph->spi = x->id.spi;
243 	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
244 
245 	sg_init_table(sg, nfrags);
246 	skb_to_sgvec(skb, sg,
247 		     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
248 		     clen + alen);
249 
250 	if ((x->props.flags & XFRM_STATE_ESN)) {
251 		sg_init_table(asg, 3);
252 		sg_set_buf(asg, &esph->spi, sizeof(__be32));
253 		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
254 		sg_set_buf(asg + 1, seqhi, seqhilen);
255 		sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
256 	} else
257 		sg_init_one(asg, esph, sizeof(*esph));
258 
259 	aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
260 	aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
261 	aead_givcrypt_set_assoc(req, asg, assoclen);
262 	aead_givcrypt_set_giv(req, esph->enc_data,
263 			      XFRM_SKB_CB(skb)->seq.output.low);
264 
265 	ESP_SKB_CB(skb)->tmp = tmp;
266 	err = crypto_aead_givencrypt(req);
267 	if (err == -EINPROGRESS)
268 		goto error;
269 
270 	if (err == -EBUSY)
271 		err = NET_XMIT_DROP;
272 
273 	kfree(tmp);
274 
275 error:
276 	return err;
277 }
278 
279 static int esp_input_done2(struct sk_buff *skb, int err)
280 {
281 	const struct iphdr *iph;
282 	struct xfrm_state *x = xfrm_input_state(skb);
283 	struct esp_data *esp = x->data;
284 	struct crypto_aead *aead = esp->aead;
285 	int alen = crypto_aead_authsize(aead);
286 	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
287 	int elen = skb->len - hlen;
288 	int ihl;
289 	u8 nexthdr[2];
290 	int padlen;
291 
292 	kfree(ESP_SKB_CB(skb)->tmp);
293 
294 	if (unlikely(err))
295 		goto out;
296 
297 	if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
298 		BUG();
299 
300 	err = -EINVAL;
301 	padlen = nexthdr[0];
302 	if (padlen + 2 + alen >= elen)
303 		goto out;
304 
305 	/* ... check padding bits here. Silly. :-) */
306 
307 	iph = ip_hdr(skb);
308 	ihl = iph->ihl * 4;
309 
310 	if (x->encap) {
311 		struct xfrm_encap_tmpl *encap = x->encap;
312 		struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
313 
314 		/*
315 		 * 1) if the NAT-T peer's IP or port changed then
316 		 *    advertize the change to the keying daemon.
317 		 *    This is an inbound SA, so just compare
318 		 *    SRC ports.
319 		 */
320 		if (iph->saddr != x->props.saddr.a4 ||
321 		    uh->source != encap->encap_sport) {
322 			xfrm_address_t ipaddr;
323 
324 			ipaddr.a4 = iph->saddr;
325 			km_new_mapping(x, &ipaddr, uh->source);
326 
327 			/* XXX: perhaps add an extra
328 			 * policy check here, to see
329 			 * if we should allow or
330 			 * reject a packet from a
331 			 * different source
332 			 * address/port.
333 			 */
334 		}
335 
336 		/*
337 		 * 2) ignore UDP/TCP checksums in case
338 		 *    of NAT-T in Transport Mode, or
339 		 *    perform other post-processing fixes
340 		 *    as per draft-ietf-ipsec-udp-encaps-06,
341 		 *    section 3.1.2
342 		 */
343 		if (x->props.mode == XFRM_MODE_TRANSPORT)
344 			skb->ip_summed = CHECKSUM_UNNECESSARY;
345 	}
346 
347 	pskb_trim(skb, skb->len - alen - padlen - 2);
348 	__skb_pull(skb, hlen);
349 	skb_set_transport_header(skb, -ihl);
350 
351 	err = nexthdr[1];
352 
353 	/* RFC4303: Drop dummy packets without any error */
354 	if (err == IPPROTO_NONE)
355 		err = -EINVAL;
356 
357 out:
358 	return err;
359 }
360 
361 static void esp_input_done(struct crypto_async_request *base, int err)
362 {
363 	struct sk_buff *skb = base->data;
364 
365 	xfrm_input_resume(skb, esp_input_done2(skb, err));
366 }
367 
368 /*
369  * Note: detecting truncated vs. non-truncated authentication data is very
370  * expensive, so we only support truncated data, which is the recommended
371  * and common case.
372  */
373 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
374 {
375 	struct ip_esp_hdr *esph;
376 	struct esp_data *esp = x->data;
377 	struct crypto_aead *aead = esp->aead;
378 	struct aead_request *req;
379 	struct sk_buff *trailer;
380 	int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
381 	int nfrags;
382 	int assoclen;
383 	int sglists;
384 	int seqhilen;
385 	__be32 *seqhi;
386 	void *tmp;
387 	u8 *iv;
388 	struct scatterlist *sg;
389 	struct scatterlist *asg;
390 	int err = -EINVAL;
391 
392 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
393 		goto out;
394 
395 	if (elen <= 0)
396 		goto out;
397 
398 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
399 		goto out;
400 	nfrags = err;
401 
402 	assoclen = sizeof(*esph);
403 	sglists = 1;
404 	seqhilen = 0;
405 
406 	if (x->props.flags & XFRM_STATE_ESN) {
407 		sglists += 2;
408 		seqhilen += sizeof(__be32);
409 		assoclen += seqhilen;
410 	}
411 
412 	err = -ENOMEM;
413 	tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
414 	if (!tmp)
415 		goto out;
416 
417 	ESP_SKB_CB(skb)->tmp = tmp;
418 	seqhi = esp_tmp_seqhi(tmp);
419 	iv = esp_tmp_iv(aead, tmp, seqhilen);
420 	req = esp_tmp_req(aead, iv);
421 	asg = esp_req_sg(aead, req);
422 	sg = asg + sglists;
423 
424 	skb->ip_summed = CHECKSUM_NONE;
425 
426 	esph = (struct ip_esp_hdr *)skb->data;
427 
428 	/* Get ivec. This can be wrong, check against another impls. */
429 	iv = esph->enc_data;
430 
431 	sg_init_table(sg, nfrags);
432 	skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
433 
434 	if ((x->props.flags & XFRM_STATE_ESN)) {
435 		sg_init_table(asg, 3);
436 		sg_set_buf(asg, &esph->spi, sizeof(__be32));
437 		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
438 		sg_set_buf(asg + 1, seqhi, seqhilen);
439 		sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
440 	} else
441 		sg_init_one(asg, esph, sizeof(*esph));
442 
443 	aead_request_set_callback(req, 0, esp_input_done, skb);
444 	aead_request_set_crypt(req, sg, sg, elen, iv);
445 	aead_request_set_assoc(req, asg, assoclen);
446 
447 	err = crypto_aead_decrypt(req);
448 	if (err == -EINPROGRESS)
449 		goto out;
450 
451 	err = esp_input_done2(skb, err);
452 
453 out:
454 	return err;
455 }
456 
457 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
458 {
459 	struct esp_data *esp = x->data;
460 	u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
461 	u32 align = max_t(u32, blksize, esp->padlen);
462 	unsigned int net_adj;
463 
464 	switch (x->props.mode) {
465 	case XFRM_MODE_TRANSPORT:
466 	case XFRM_MODE_BEET:
467 		net_adj = sizeof(struct iphdr);
468 		break;
469 	case XFRM_MODE_TUNNEL:
470 		net_adj = 0;
471 		break;
472 	default:
473 		BUG();
474 	}
475 
476 	return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
477 		 net_adj) & ~(align - 1)) + (net_adj - 2);
478 }
479 
480 static void esp4_err(struct sk_buff *skb, u32 info)
481 {
482 	struct net *net = dev_net(skb->dev);
483 	const struct iphdr *iph = (const struct iphdr *)skb->data;
484 	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
485 	struct xfrm_state *x;
486 
487 	switch (icmp_hdr(skb)->type) {
488 	case ICMP_DEST_UNREACH:
489 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
490 			return;
491 	case ICMP_REDIRECT:
492 		break;
493 	default:
494 		return;
495 	}
496 
497 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
498 			      esph->spi, IPPROTO_ESP, AF_INET);
499 	if (!x)
500 		return;
501 
502 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
503 		ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
504 	else
505 		ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
506 	xfrm_state_put(x);
507 }
508 
509 static void esp_destroy(struct xfrm_state *x)
510 {
511 	struct esp_data *esp = x->data;
512 
513 	if (!esp)
514 		return;
515 
516 	crypto_free_aead(esp->aead);
517 	kfree(esp);
518 }
519 
520 static int esp_init_aead(struct xfrm_state *x)
521 {
522 	struct esp_data *esp = x->data;
523 	struct crypto_aead *aead;
524 	int err;
525 
526 	aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
527 	err = PTR_ERR(aead);
528 	if (IS_ERR(aead))
529 		goto error;
530 
531 	esp->aead = aead;
532 
533 	err = crypto_aead_setkey(aead, x->aead->alg_key,
534 				 (x->aead->alg_key_len + 7) / 8);
535 	if (err)
536 		goto error;
537 
538 	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
539 	if (err)
540 		goto error;
541 
542 error:
543 	return err;
544 }
545 
546 static int esp_init_authenc(struct xfrm_state *x)
547 {
548 	struct esp_data *esp = x->data;
549 	struct crypto_aead *aead;
550 	struct crypto_authenc_key_param *param;
551 	struct rtattr *rta;
552 	char *key;
553 	char *p;
554 	char authenc_name[CRYPTO_MAX_ALG_NAME];
555 	unsigned int keylen;
556 	int err;
557 
558 	err = -EINVAL;
559 	if (x->ealg == NULL)
560 		goto error;
561 
562 	err = -ENAMETOOLONG;
563 
564 	if ((x->props.flags & XFRM_STATE_ESN)) {
565 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
566 			     "authencesn(%s,%s)",
567 			     x->aalg ? x->aalg->alg_name : "digest_null",
568 			     x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
569 			goto error;
570 	} else {
571 		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
572 			     "authenc(%s,%s)",
573 			     x->aalg ? x->aalg->alg_name : "digest_null",
574 			     x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
575 			goto error;
576 	}
577 
578 	aead = crypto_alloc_aead(authenc_name, 0, 0);
579 	err = PTR_ERR(aead);
580 	if (IS_ERR(aead))
581 		goto error;
582 
583 	esp->aead = aead;
584 
585 	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
586 		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
587 	err = -ENOMEM;
588 	key = kmalloc(keylen, GFP_KERNEL);
589 	if (!key)
590 		goto error;
591 
592 	p = key;
593 	rta = (void *)p;
594 	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
595 	rta->rta_len = RTA_LENGTH(sizeof(*param));
596 	param = RTA_DATA(rta);
597 	p += RTA_SPACE(sizeof(*param));
598 
599 	if (x->aalg) {
600 		struct xfrm_algo_desc *aalg_desc;
601 
602 		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
603 		p += (x->aalg->alg_key_len + 7) / 8;
604 
605 		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
606 		BUG_ON(!aalg_desc);
607 
608 		err = -EINVAL;
609 		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
610 		    crypto_aead_authsize(aead)) {
611 			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
612 				 x->aalg->alg_name,
613 				 crypto_aead_authsize(aead),
614 				 aalg_desc->uinfo.auth.icv_fullbits/8);
615 			goto free_key;
616 		}
617 
618 		err = crypto_aead_setauthsize(
619 			aead, x->aalg->alg_trunc_len / 8);
620 		if (err)
621 			goto free_key;
622 	}
623 
624 	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
625 	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
626 
627 	err = crypto_aead_setkey(aead, key, keylen);
628 
629 free_key:
630 	kfree(key);
631 
632 error:
633 	return err;
634 }
635 
636 static int esp_init_state(struct xfrm_state *x)
637 {
638 	struct esp_data *esp;
639 	struct crypto_aead *aead;
640 	u32 align;
641 	int err;
642 
643 	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
644 	if (esp == NULL)
645 		return -ENOMEM;
646 
647 	x->data = esp;
648 
649 	if (x->aead)
650 		err = esp_init_aead(x);
651 	else
652 		err = esp_init_authenc(x);
653 
654 	if (err)
655 		goto error;
656 
657 	aead = esp->aead;
658 
659 	esp->padlen = 0;
660 
661 	x->props.header_len = sizeof(struct ip_esp_hdr) +
662 			      crypto_aead_ivsize(aead);
663 	if (x->props.mode == XFRM_MODE_TUNNEL)
664 		x->props.header_len += sizeof(struct iphdr);
665 	else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
666 		x->props.header_len += IPV4_BEET_PHMAXLEN;
667 	if (x->encap) {
668 		struct xfrm_encap_tmpl *encap = x->encap;
669 
670 		switch (encap->encap_type) {
671 		default:
672 			goto error;
673 		case UDP_ENCAP_ESPINUDP:
674 			x->props.header_len += sizeof(struct udphdr);
675 			break;
676 		case UDP_ENCAP_ESPINUDP_NON_IKE:
677 			x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
678 			break;
679 		}
680 	}
681 
682 	align = ALIGN(crypto_aead_blocksize(aead), 4);
683 	if (esp->padlen)
684 		align = max_t(u32, align, esp->padlen);
685 	x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
686 
687 error:
688 	return err;
689 }
690 
691 static const struct xfrm_type esp_type =
692 {
693 	.description	= "ESP4",
694 	.owner		= THIS_MODULE,
695 	.proto	     	= IPPROTO_ESP,
696 	.flags		= XFRM_TYPE_REPLAY_PROT,
697 	.init_state	= esp_init_state,
698 	.destructor	= esp_destroy,
699 	.get_mtu	= esp4_get_mtu,
700 	.input		= esp_input,
701 	.output		= esp_output
702 };
703 
704 static const struct net_protocol esp4_protocol = {
705 	.handler	=	xfrm4_rcv,
706 	.err_handler	=	esp4_err,
707 	.no_policy	=	1,
708 	.netns_ok	=	1,
709 };
710 
711 static int __init esp4_init(void)
712 {
713 	if (xfrm_register_type(&esp_type, AF_INET) < 0) {
714 		pr_info("%s: can't add xfrm type\n", __func__);
715 		return -EAGAIN;
716 	}
717 	if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
718 		pr_info("%s: can't add protocol\n", __func__);
719 		xfrm_unregister_type(&esp_type, AF_INET);
720 		return -EAGAIN;
721 	}
722 	return 0;
723 }
724 
725 static void __exit esp4_fini(void)
726 {
727 	if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
728 		pr_info("%s: can't remove protocol\n", __func__);
729 	if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
730 		pr_info("%s: can't remove xfrm type\n", __func__);
731 }
732 
733 module_init(esp4_init);
734 module_exit(esp4_fini);
735 MODULE_LICENSE("GPL");
736 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
737