xref: /linux/net/ipv4/ah4.c (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1 #define pr_fmt(fmt) "IPsec: " fmt
2 
3 #include <crypto/hash.h>
4 #include <linux/err.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <net/ip.h>
8 #include <net/xfrm.h>
9 #include <net/ah.h>
10 #include <linux/crypto.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/scatterlist.h>
13 #include <net/icmp.h>
14 #include <net/protocol.h>
15 
16 struct ah_skb_cb {
17 	struct xfrm_skb_cb xfrm;
18 	void *tmp;
19 };
20 
21 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
22 
23 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
24 			  unsigned int size)
25 {
26 	unsigned int len;
27 
28 	len = size + crypto_ahash_digestsize(ahash) +
29 	      (crypto_ahash_alignmask(ahash) &
30 	       ~(crypto_tfm_ctx_alignment() - 1));
31 
32 	len = ALIGN(len, crypto_tfm_ctx_alignment());
33 
34 	len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
35 	len = ALIGN(len, __alignof__(struct scatterlist));
36 
37 	len += sizeof(struct scatterlist) * nfrags;
38 
39 	return kmalloc(len, GFP_ATOMIC);
40 }
41 
42 static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
43 {
44 	return tmp + offset;
45 }
46 
47 static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
48 			     unsigned int offset)
49 {
50 	return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
51 }
52 
53 static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
54 					       u8 *icv)
55 {
56 	struct ahash_request *req;
57 
58 	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
59 				crypto_tfm_ctx_alignment());
60 
61 	ahash_request_set_tfm(req, ahash);
62 
63 	return req;
64 }
65 
66 static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
67 					     struct ahash_request *req)
68 {
69 	return (void *)ALIGN((unsigned long)(req + 1) +
70 			     crypto_ahash_reqsize(ahash),
71 			     __alignof__(struct scatterlist));
72 }
73 
74 /* Clear mutable options and find final destination to substitute
75  * into IP header for icv calculation. Options are already checked
76  * for validity, so paranoia is not required. */
77 
78 static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
79 {
80 	unsigned char *optptr = (unsigned char *)(iph+1);
81 	int  l = iph->ihl*4 - sizeof(struct iphdr);
82 	int  optlen;
83 
84 	while (l > 0) {
85 		switch (*optptr) {
86 		case IPOPT_END:
87 			return 0;
88 		case IPOPT_NOOP:
89 			l--;
90 			optptr++;
91 			continue;
92 		}
93 		optlen = optptr[1];
94 		if (optlen<2 || optlen>l)
95 			return -EINVAL;
96 		switch (*optptr) {
97 		case IPOPT_SEC:
98 		case 0x85:	/* Some "Extended Security" crap. */
99 		case IPOPT_CIPSO:
100 		case IPOPT_RA:
101 		case 0x80|21:	/* RFC1770 */
102 			break;
103 		case IPOPT_LSRR:
104 		case IPOPT_SSRR:
105 			if (optlen < 6)
106 				return -EINVAL;
107 			memcpy(daddr, optptr+optlen-4, 4);
108 			/* Fall through */
109 		default:
110 			memset(optptr, 0, optlen);
111 		}
112 		l -= optlen;
113 		optptr += optlen;
114 	}
115 	return 0;
116 }
117 
118 static void ah_output_done(struct crypto_async_request *base, int err)
119 {
120 	u8 *icv;
121 	struct iphdr *iph;
122 	struct sk_buff *skb = base->data;
123 	struct xfrm_state *x = skb_dst(skb)->xfrm;
124 	struct ah_data *ahp = x->data;
125 	struct iphdr *top_iph = ip_hdr(skb);
126 	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
127 	int ihl = ip_hdrlen(skb);
128 
129 	iph = AH_SKB_CB(skb)->tmp;
130 	icv = ah_tmp_icv(ahp->ahash, iph, ihl);
131 	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
132 
133 	top_iph->tos = iph->tos;
134 	top_iph->ttl = iph->ttl;
135 	top_iph->frag_off = iph->frag_off;
136 	if (top_iph->ihl != 5) {
137 		top_iph->daddr = iph->daddr;
138 		memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
139 	}
140 
141 	kfree(AH_SKB_CB(skb)->tmp);
142 	xfrm_output_resume(skb, err);
143 }
144 
145 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
146 {
147 	int err;
148 	int nfrags;
149 	int ihl;
150 	u8 *icv;
151 	struct sk_buff *trailer;
152 	struct crypto_ahash *ahash;
153 	struct ahash_request *req;
154 	struct scatterlist *sg;
155 	struct iphdr *iph, *top_iph;
156 	struct ip_auth_hdr *ah;
157 	struct ah_data *ahp;
158 	int seqhi_len = 0;
159 	__be32 *seqhi;
160 	int sglists = 0;
161 	struct scatterlist *seqhisg;
162 
163 	ahp = x->data;
164 	ahash = ahp->ahash;
165 
166 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
167 		goto out;
168 	nfrags = err;
169 
170 	skb_push(skb, -skb_network_offset(skb));
171 	ah = ip_auth_hdr(skb);
172 	ihl = ip_hdrlen(skb);
173 
174 	if (x->props.flags & XFRM_STATE_ESN) {
175 		sglists = 1;
176 		seqhi_len = sizeof(*seqhi);
177 	}
178 	err = -ENOMEM;
179 	iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
180 	if (!iph)
181 		goto out;
182 	seqhi = (__be32 *)((char *)iph + ihl);
183 	icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
184 	req = ah_tmp_req(ahash, icv);
185 	sg = ah_req_sg(ahash, req);
186 	seqhisg = sg + nfrags;
187 
188 	memset(ah->auth_data, 0, ahp->icv_trunc_len);
189 
190 	top_iph = ip_hdr(skb);
191 
192 	iph->tos = top_iph->tos;
193 	iph->ttl = top_iph->ttl;
194 	iph->frag_off = top_iph->frag_off;
195 
196 	if (top_iph->ihl != 5) {
197 		iph->daddr = top_iph->daddr;
198 		memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
199 		err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
200 		if (err)
201 			goto out_free;
202 	}
203 
204 	ah->nexthdr = *skb_mac_header(skb);
205 	*skb_mac_header(skb) = IPPROTO_AH;
206 
207 	top_iph->tos = 0;
208 	top_iph->tot_len = htons(skb->len);
209 	top_iph->frag_off = 0;
210 	top_iph->ttl = 0;
211 	top_iph->check = 0;
212 
213 	if (x->props.flags & XFRM_STATE_ALIGN4)
214 		ah->hdrlen  = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
215 	else
216 		ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
217 
218 	ah->reserved = 0;
219 	ah->spi = x->id.spi;
220 	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
221 
222 	sg_init_table(sg, nfrags + sglists);
223 	skb_to_sgvec_nomark(skb, sg, 0, skb->len);
224 
225 	if (x->props.flags & XFRM_STATE_ESN) {
226 		/* Attach seqhi sg right after packet payload */
227 		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
228 		sg_set_buf(seqhisg, seqhi, seqhi_len);
229 	}
230 	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
231 	ahash_request_set_callback(req, 0, ah_output_done, skb);
232 
233 	AH_SKB_CB(skb)->tmp = iph;
234 
235 	err = crypto_ahash_digest(req);
236 	if (err) {
237 		if (err == -EINPROGRESS)
238 			goto out;
239 
240 		if (err == -EBUSY)
241 			err = NET_XMIT_DROP;
242 		goto out_free;
243 	}
244 
245 	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
246 
247 	top_iph->tos = iph->tos;
248 	top_iph->ttl = iph->ttl;
249 	top_iph->frag_off = iph->frag_off;
250 	if (top_iph->ihl != 5) {
251 		top_iph->daddr = iph->daddr;
252 		memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
253 	}
254 
255 out_free:
256 	kfree(iph);
257 out:
258 	return err;
259 }
260 
261 static void ah_input_done(struct crypto_async_request *base, int err)
262 {
263 	u8 *auth_data;
264 	u8 *icv;
265 	struct iphdr *work_iph;
266 	struct sk_buff *skb = base->data;
267 	struct xfrm_state *x = xfrm_input_state(skb);
268 	struct ah_data *ahp = x->data;
269 	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
270 	int ihl = ip_hdrlen(skb);
271 	int ah_hlen = (ah->hdrlen + 2) << 2;
272 
273 	work_iph = AH_SKB_CB(skb)->tmp;
274 	auth_data = ah_tmp_auth(work_iph, ihl);
275 	icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
276 
277 	err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
278 	if (err)
279 		goto out;
280 
281 	err = ah->nexthdr;
282 
283 	skb->network_header += ah_hlen;
284 	memcpy(skb_network_header(skb), work_iph, ihl);
285 	__skb_pull(skb, ah_hlen + ihl);
286 
287 	if (x->props.mode == XFRM_MODE_TUNNEL)
288 		skb_reset_transport_header(skb);
289 	else
290 		skb_set_transport_header(skb, -ihl);
291 out:
292 	kfree(AH_SKB_CB(skb)->tmp);
293 	xfrm_input_resume(skb, err);
294 }
295 
296 static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
297 {
298 	int ah_hlen;
299 	int ihl;
300 	int nexthdr;
301 	int nfrags;
302 	u8 *auth_data;
303 	u8 *icv;
304 	struct sk_buff *trailer;
305 	struct crypto_ahash *ahash;
306 	struct ahash_request *req;
307 	struct scatterlist *sg;
308 	struct iphdr *iph, *work_iph;
309 	struct ip_auth_hdr *ah;
310 	struct ah_data *ahp;
311 	int err = -ENOMEM;
312 	int seqhi_len = 0;
313 	__be32 *seqhi;
314 	int sglists = 0;
315 	struct scatterlist *seqhisg;
316 
317 	if (!pskb_may_pull(skb, sizeof(*ah)))
318 		goto out;
319 
320 	ah = (struct ip_auth_hdr *)skb->data;
321 	ahp = x->data;
322 	ahash = ahp->ahash;
323 
324 	nexthdr = ah->nexthdr;
325 	ah_hlen = (ah->hdrlen + 2) << 2;
326 
327 	if (x->props.flags & XFRM_STATE_ALIGN4) {
328 		if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
329 		    ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
330 			goto out;
331 	} else {
332 		if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
333 		    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
334 			goto out;
335 	}
336 
337 	if (!pskb_may_pull(skb, ah_hlen))
338 		goto out;
339 
340 	/* We are going to _remove_ AH header to keep sockets happy,
341 	 * so... Later this can change. */
342 	if (skb_unclone(skb, GFP_ATOMIC))
343 		goto out;
344 
345 	skb->ip_summed = CHECKSUM_NONE;
346 
347 
348 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
349 		goto out;
350 	nfrags = err;
351 
352 	ah = (struct ip_auth_hdr *)skb->data;
353 	iph = ip_hdr(skb);
354 	ihl = ip_hdrlen(skb);
355 
356 	if (x->props.flags & XFRM_STATE_ESN) {
357 		sglists = 1;
358 		seqhi_len = sizeof(*seqhi);
359 	}
360 
361 	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
362 				ahp->icv_trunc_len + seqhi_len);
363 	if (!work_iph) {
364 		err = -ENOMEM;
365 		goto out;
366 	}
367 
368 	seqhi = (__be32 *)((char *)work_iph + ihl);
369 	auth_data = ah_tmp_auth(seqhi, seqhi_len);
370 	icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
371 	req = ah_tmp_req(ahash, icv);
372 	sg = ah_req_sg(ahash, req);
373 	seqhisg = sg + nfrags;
374 
375 	memcpy(work_iph, iph, ihl);
376 	memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
377 	memset(ah->auth_data, 0, ahp->icv_trunc_len);
378 
379 	iph->ttl = 0;
380 	iph->tos = 0;
381 	iph->frag_off = 0;
382 	iph->check = 0;
383 	if (ihl > sizeof(*iph)) {
384 		__be32 dummy;
385 		err = ip_clear_mutable_options(iph, &dummy);
386 		if (err)
387 			goto out_free;
388 	}
389 
390 	skb_push(skb, ihl);
391 
392 	sg_init_table(sg, nfrags + sglists);
393 	skb_to_sgvec_nomark(skb, sg, 0, skb->len);
394 
395 	if (x->props.flags & XFRM_STATE_ESN) {
396 		/* Attach seqhi sg right after packet payload */
397 		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
398 		sg_set_buf(seqhisg, seqhi, seqhi_len);
399 	}
400 	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
401 	ahash_request_set_callback(req, 0, ah_input_done, skb);
402 
403 	AH_SKB_CB(skb)->tmp = work_iph;
404 
405 	err = crypto_ahash_digest(req);
406 	if (err) {
407 		if (err == -EINPROGRESS)
408 			goto out;
409 
410 		goto out_free;
411 	}
412 
413 	err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
414 	if (err)
415 		goto out_free;
416 
417 	skb->network_header += ah_hlen;
418 	memcpy(skb_network_header(skb), work_iph, ihl);
419 	__skb_pull(skb, ah_hlen + ihl);
420 	if (x->props.mode == XFRM_MODE_TUNNEL)
421 		skb_reset_transport_header(skb);
422 	else
423 		skb_set_transport_header(skb, -ihl);
424 
425 	err = nexthdr;
426 
427 out_free:
428 	kfree (work_iph);
429 out:
430 	return err;
431 }
432 
433 static int ah4_err(struct sk_buff *skb, u32 info)
434 {
435 	struct net *net = dev_net(skb->dev);
436 	const struct iphdr *iph = (const struct iphdr *)skb->data;
437 	struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
438 	struct xfrm_state *x;
439 
440 	switch (icmp_hdr(skb)->type) {
441 	case ICMP_DEST_UNREACH:
442 		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
443 			return 0;
444 	case ICMP_REDIRECT:
445 		break;
446 	default:
447 		return 0;
448 	}
449 
450 	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
451 			      ah->spi, IPPROTO_AH, AF_INET);
452 	if (!x)
453 		return 0;
454 
455 	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
456 		ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
457 	else
458 		ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
459 	xfrm_state_put(x);
460 
461 	return 0;
462 }
463 
464 static int ah_init_state(struct xfrm_state *x)
465 {
466 	struct ah_data *ahp = NULL;
467 	struct xfrm_algo_desc *aalg_desc;
468 	struct crypto_ahash *ahash;
469 
470 	if (!x->aalg)
471 		goto error;
472 
473 	if (x->encap)
474 		goto error;
475 
476 	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
477 	if (!ahp)
478 		return -ENOMEM;
479 
480 	ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
481 	if (IS_ERR(ahash))
482 		goto error;
483 
484 	ahp->ahash = ahash;
485 	if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
486 				(x->aalg->alg_key_len + 7) / 8))
487 		goto error;
488 
489 	/*
490 	 * Lookup the algorithm description maintained by xfrm_algo,
491 	 * verify crypto transform properties, and store information
492 	 * we need for AH processing.  This lookup cannot fail here
493 	 * after a successful crypto_alloc_ahash().
494 	 */
495 	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
496 	BUG_ON(!aalg_desc);
497 
498 	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
499 	    crypto_ahash_digestsize(ahash)) {
500 		pr_info("%s: %s digestsize %u != %hu\n",
501 			__func__, x->aalg->alg_name,
502 			crypto_ahash_digestsize(ahash),
503 			aalg_desc->uinfo.auth.icv_fullbits / 8);
504 		goto error;
505 	}
506 
507 	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
508 	ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
509 
510 	if (x->props.flags & XFRM_STATE_ALIGN4)
511 		x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
512 						  ahp->icv_trunc_len);
513 	else
514 		x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
515 						  ahp->icv_trunc_len);
516 	if (x->props.mode == XFRM_MODE_TUNNEL)
517 		x->props.header_len += sizeof(struct iphdr);
518 	x->data = ahp;
519 
520 	return 0;
521 
522 error:
523 	if (ahp) {
524 		crypto_free_ahash(ahp->ahash);
525 		kfree(ahp);
526 	}
527 	return -EINVAL;
528 }
529 
530 static void ah_destroy(struct xfrm_state *x)
531 {
532 	struct ah_data *ahp = x->data;
533 
534 	if (!ahp)
535 		return;
536 
537 	crypto_free_ahash(ahp->ahash);
538 	kfree(ahp);
539 }
540 
541 static int ah4_rcv_cb(struct sk_buff *skb, int err)
542 {
543 	return 0;
544 }
545 
546 static const struct xfrm_type ah_type =
547 {
548 	.description	= "AH4",
549 	.owner		= THIS_MODULE,
550 	.proto	     	= IPPROTO_AH,
551 	.flags		= XFRM_TYPE_REPLAY_PROT,
552 	.init_state	= ah_init_state,
553 	.destructor	= ah_destroy,
554 	.input		= ah_input,
555 	.output		= ah_output
556 };
557 
558 static struct xfrm4_protocol ah4_protocol = {
559 	.handler	=	xfrm4_rcv,
560 	.input_handler	=	xfrm_input,
561 	.cb_handler	=	ah4_rcv_cb,
562 	.err_handler	=	ah4_err,
563 	.priority	=	0,
564 };
565 
566 static int __init ah4_init(void)
567 {
568 	if (xfrm_register_type(&ah_type, AF_INET) < 0) {
569 		pr_info("%s: can't add xfrm type\n", __func__);
570 		return -EAGAIN;
571 	}
572 	if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) {
573 		pr_info("%s: can't add protocol\n", __func__);
574 		xfrm_unregister_type(&ah_type, AF_INET);
575 		return -EAGAIN;
576 	}
577 	return 0;
578 }
579 
580 static void __exit ah4_fini(void)
581 {
582 	if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0)
583 		pr_info("%s: can't remove protocol\n", __func__);
584 	if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
585 		pr_info("%s: can't remove xfrm type\n", __func__);
586 }
587 
588 module_init(ah4_init);
589 module_exit(ah4_fini);
590 MODULE_LICENSE("GPL");
591 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);
592