xref: /linux/net/xfrm/xfrm_algo.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * xfrm algorithm interface
3  *
4  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
16 #include <net/xfrm.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
18 #include <net/ah.h>
19 #endif
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
21 #include <net/esp.h>
22 #endif
23 #include <asm/scatterlist.h>
24 
25 /*
26  * Algorithms supported by IPsec.  These entries contain properties which
27  * are used in key negotiation and xfrm processing, and are used to verify
28  * that instantiated crypto transforms have correct parameters for IPsec
29  * purposes.
30  */
31 static struct xfrm_algo_desc aalg_list[] = {
32 {
33 	.name = "digest_null",
34 
35 	.uinfo = {
36 		.auth = {
37 			.icv_truncbits = 0,
38 			.icv_fullbits = 0,
39 		}
40 	},
41 
42 	.desc = {
43 		.sadb_alg_id = SADB_X_AALG_NULL,
44 		.sadb_alg_ivlen = 0,
45 		.sadb_alg_minbits = 0,
46 		.sadb_alg_maxbits = 0
47 	}
48 },
49 {
50 	.name = "md5",
51 
52 	.uinfo = {
53 		.auth = {
54 			.icv_truncbits = 96,
55 			.icv_fullbits = 128,
56 		}
57 	},
58 
59 	.desc = {
60 		.sadb_alg_id = SADB_AALG_MD5HMAC,
61 		.sadb_alg_ivlen = 0,
62 		.sadb_alg_minbits = 128,
63 		.sadb_alg_maxbits = 128
64 	}
65 },
66 {
67 	.name = "sha1",
68 
69 	.uinfo = {
70 		.auth = {
71 			.icv_truncbits = 96,
72 			.icv_fullbits = 160,
73 		}
74 	},
75 
76 	.desc = {
77 		.sadb_alg_id = SADB_AALG_SHA1HMAC,
78 		.sadb_alg_ivlen = 0,
79 		.sadb_alg_minbits = 160,
80 		.sadb_alg_maxbits = 160
81 	}
82 },
83 {
84 	.name = "sha256",
85 
86 	.uinfo = {
87 		.auth = {
88 			.icv_truncbits = 96,
89 			.icv_fullbits = 256,
90 		}
91 	},
92 
93 	.desc = {
94 		.sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
95 		.sadb_alg_ivlen = 0,
96 		.sadb_alg_minbits = 256,
97 		.sadb_alg_maxbits = 256
98 	}
99 },
100 {
101 	.name = "ripemd160",
102 
103 	.uinfo = {
104 		.auth = {
105 			.icv_truncbits = 96,
106 			.icv_fullbits = 160,
107 		}
108 	},
109 
110 	.desc = {
111 		.sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
112 		.sadb_alg_ivlen = 0,
113 		.sadb_alg_minbits = 160,
114 		.sadb_alg_maxbits = 160
115 	}
116 },
117 };
118 
119 static struct xfrm_algo_desc ealg_list[] = {
120 {
121 	.name = "cipher_null",
122 
123 	.uinfo = {
124 		.encr = {
125 			.blockbits = 8,
126 			.defkeybits = 0,
127 		}
128 	},
129 
130 	.desc = {
131 		.sadb_alg_id =	SADB_EALG_NULL,
132 		.sadb_alg_ivlen = 0,
133 		.sadb_alg_minbits = 0,
134 		.sadb_alg_maxbits = 0
135 	}
136 },
137 {
138 	.name = "des",
139 
140 	.uinfo = {
141 		.encr = {
142 			.blockbits = 64,
143 			.defkeybits = 64,
144 		}
145 	},
146 
147 	.desc = {
148 		.sadb_alg_id = SADB_EALG_DESCBC,
149 		.sadb_alg_ivlen = 8,
150 		.sadb_alg_minbits = 64,
151 		.sadb_alg_maxbits = 64
152 	}
153 },
154 {
155 	.name = "des3_ede",
156 
157 	.uinfo = {
158 		.encr = {
159 			.blockbits = 64,
160 			.defkeybits = 192,
161 		}
162 	},
163 
164 	.desc = {
165 		.sadb_alg_id = SADB_EALG_3DESCBC,
166 		.sadb_alg_ivlen = 8,
167 		.sadb_alg_minbits = 192,
168 		.sadb_alg_maxbits = 192
169 	}
170 },
171 {
172 	.name = "cast128",
173 
174 	.uinfo = {
175 		.encr = {
176 			.blockbits = 64,
177 			.defkeybits = 128,
178 		}
179 	},
180 
181 	.desc = {
182 		.sadb_alg_id = SADB_X_EALG_CASTCBC,
183 		.sadb_alg_ivlen = 8,
184 		.sadb_alg_minbits = 40,
185 		.sadb_alg_maxbits = 128
186 	}
187 },
188 {
189 	.name = "blowfish",
190 
191 	.uinfo = {
192 		.encr = {
193 			.blockbits = 64,
194 			.defkeybits = 128,
195 		}
196 	},
197 
198 	.desc = {
199 		.sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
200 		.sadb_alg_ivlen = 8,
201 		.sadb_alg_minbits = 40,
202 		.sadb_alg_maxbits = 448
203 	}
204 },
205 {
206 	.name = "aes",
207 
208 	.uinfo = {
209 		.encr = {
210 			.blockbits = 128,
211 			.defkeybits = 128,
212 		}
213 	},
214 
215 	.desc = {
216 		.sadb_alg_id = SADB_X_EALG_AESCBC,
217 		.sadb_alg_ivlen = 8,
218 		.sadb_alg_minbits = 128,
219 		.sadb_alg_maxbits = 256
220 	}
221 },
222 {
223         .name = "serpent",
224 
225         .uinfo = {
226                 .encr = {
227                         .blockbits = 128,
228                         .defkeybits = 128,
229                 }
230         },
231 
232         .desc = {
233                 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
234                 .sadb_alg_ivlen = 8,
235                 .sadb_alg_minbits = 128,
236                 .sadb_alg_maxbits = 256,
237         }
238 },
239 {
240         .name = "twofish",
241 
242         .uinfo = {
243                 .encr = {
244                         .blockbits = 128,
245                         .defkeybits = 128,
246                 }
247         },
248 
249         .desc = {
250                 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
251                 .sadb_alg_ivlen = 8,
252                 .sadb_alg_minbits = 128,
253                 .sadb_alg_maxbits = 256
254         }
255 },
256 };
257 
258 static struct xfrm_algo_desc calg_list[] = {
259 {
260 	.name = "deflate",
261 	.uinfo = {
262 		.comp = {
263 			.threshold = 90,
264 		}
265 	},
266 	.desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
267 },
268 {
269 	.name = "lzs",
270 	.uinfo = {
271 		.comp = {
272 			.threshold = 90,
273 		}
274 	},
275 	.desc = { .sadb_alg_id = SADB_X_CALG_LZS }
276 },
277 {
278 	.name = "lzjh",
279 	.uinfo = {
280 		.comp = {
281 			.threshold = 50,
282 		}
283 	},
284 	.desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
285 },
286 };
287 
288 static inline int aalg_entries(void)
289 {
290 	return ARRAY_SIZE(aalg_list);
291 }
292 
293 static inline int ealg_entries(void)
294 {
295 	return ARRAY_SIZE(ealg_list);
296 }
297 
298 static inline int calg_entries(void)
299 {
300 	return ARRAY_SIZE(calg_list);
301 }
302 
303 /* Todo: generic iterators */
304 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
305 {
306 	int i;
307 
308 	for (i = 0; i < aalg_entries(); i++) {
309 		if (aalg_list[i].desc.sadb_alg_id == alg_id) {
310 			if (aalg_list[i].available)
311 				return &aalg_list[i];
312 			else
313 				break;
314 		}
315 	}
316 	return NULL;
317 }
318 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
319 
320 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
321 {
322 	int i;
323 
324 	for (i = 0; i < ealg_entries(); i++) {
325 		if (ealg_list[i].desc.sadb_alg_id == alg_id) {
326 			if (ealg_list[i].available)
327 				return &ealg_list[i];
328 			else
329 				break;
330 		}
331 	}
332 	return NULL;
333 }
334 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
335 
336 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
337 {
338 	int i;
339 
340 	for (i = 0; i < calg_entries(); i++) {
341 		if (calg_list[i].desc.sadb_alg_id == alg_id) {
342 			if (calg_list[i].available)
343 				return &calg_list[i];
344 			else
345 				break;
346 		}
347 	}
348 	return NULL;
349 }
350 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
351 
352 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
353 					      int entries, char *name,
354 					      int probe)
355 {
356 	int i, status;
357 
358 	if (!name)
359 		return NULL;
360 
361 	for (i = 0; i < entries; i++) {
362 		if (strcmp(name, list[i].name))
363 			continue;
364 
365 		if (list[i].available)
366 			return &list[i];
367 
368 		if (!probe)
369 			break;
370 
371 		status = crypto_alg_available(name, 0);
372 		if (!status)
373 			break;
374 
375 		list[i].available = status;
376 		return &list[i];
377 	}
378 	return NULL;
379 }
380 
381 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
382 {
383 	return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
384 }
385 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
386 
387 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
388 {
389 	return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
390 }
391 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
392 
393 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
394 {
395 	return xfrm_get_byname(calg_list, calg_entries(), name, probe);
396 }
397 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
398 
399 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
400 {
401 	if (idx >= aalg_entries())
402 		return NULL;
403 
404 	return &aalg_list[idx];
405 }
406 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
407 
408 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
409 {
410 	if (idx >= ealg_entries())
411 		return NULL;
412 
413 	return &ealg_list[idx];
414 }
415 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
416 
417 /*
418  * Probe for the availability of crypto algorithms, and set the available
419  * flag for any algorithms found on the system.  This is typically called by
420  * pfkey during userspace SA add, update or register.
421  */
422 void xfrm_probe_algs(void)
423 {
424 #ifdef CONFIG_CRYPTO
425 	int i, status;
426 
427 	BUG_ON(in_softirq());
428 
429 	for (i = 0; i < aalg_entries(); i++) {
430 		status = crypto_alg_available(aalg_list[i].name, 0);
431 		if (aalg_list[i].available != status)
432 			aalg_list[i].available = status;
433 	}
434 
435 	for (i = 0; i < ealg_entries(); i++) {
436 		status = crypto_alg_available(ealg_list[i].name, 0);
437 		if (ealg_list[i].available != status)
438 			ealg_list[i].available = status;
439 	}
440 
441 	for (i = 0; i < calg_entries(); i++) {
442 		status = crypto_alg_available(calg_list[i].name, 0);
443 		if (calg_list[i].available != status)
444 			calg_list[i].available = status;
445 	}
446 #endif
447 }
448 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
449 
450 int xfrm_count_auth_supported(void)
451 {
452 	int i, n;
453 
454 	for (i = 0, n = 0; i < aalg_entries(); i++)
455 		if (aalg_list[i].available)
456 			n++;
457 	return n;
458 }
459 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
460 
461 int xfrm_count_enc_supported(void)
462 {
463 	int i, n;
464 
465 	for (i = 0, n = 0; i < ealg_entries(); i++)
466 		if (ealg_list[i].available)
467 			n++;
468 	return n;
469 }
470 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
471 
472 /* Move to common area: it is shared with AH. */
473 
474 void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
475 		  int offset, int len, icv_update_fn_t icv_update)
476 {
477 	int start = skb_headlen(skb);
478 	int i, copy = start - offset;
479 	struct scatterlist sg;
480 
481 	/* Checksum header. */
482 	if (copy > 0) {
483 		if (copy > len)
484 			copy = len;
485 
486 		sg.page = virt_to_page(skb->data + offset);
487 		sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
488 		sg.length = copy;
489 
490 		icv_update(tfm, &sg, 1);
491 
492 		if ((len -= copy) == 0)
493 			return;
494 		offset += copy;
495 	}
496 
497 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
498 		int end;
499 
500 		BUG_TRAP(start <= offset + len);
501 
502 		end = start + skb_shinfo(skb)->frags[i].size;
503 		if ((copy = end - offset) > 0) {
504 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
505 
506 			if (copy > len)
507 				copy = len;
508 
509 			sg.page = frag->page;
510 			sg.offset = frag->page_offset + offset-start;
511 			sg.length = copy;
512 
513 			icv_update(tfm, &sg, 1);
514 
515 			if (!(len -= copy))
516 				return;
517 			offset += copy;
518 		}
519 		start = end;
520 	}
521 
522 	if (skb_shinfo(skb)->frag_list) {
523 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
524 
525 		for (; list; list = list->next) {
526 			int end;
527 
528 			BUG_TRAP(start <= offset + len);
529 
530 			end = start + list->len;
531 			if ((copy = end - offset) > 0) {
532 				if (copy > len)
533 					copy = len;
534 				skb_icv_walk(list, tfm, offset-start, copy, icv_update);
535 				if ((len -= copy) == 0)
536 					return;
537 				offset += copy;
538 			}
539 			start = end;
540 		}
541 	}
542 	BUG_ON(len);
543 }
544 EXPORT_SYMBOL_GPL(skb_icv_walk);
545 
546 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
547 
548 /* Looking generic it is not used in another places. */
549 
550 int
551 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
552 {
553 	int start = skb_headlen(skb);
554 	int i, copy = start - offset;
555 	int elt = 0;
556 
557 	if (copy > 0) {
558 		if (copy > len)
559 			copy = len;
560 		sg[elt].page = virt_to_page(skb->data + offset);
561 		sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
562 		sg[elt].length = copy;
563 		elt++;
564 		if ((len -= copy) == 0)
565 			return elt;
566 		offset += copy;
567 	}
568 
569 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
570 		int end;
571 
572 		BUG_TRAP(start <= offset + len);
573 
574 		end = start + skb_shinfo(skb)->frags[i].size;
575 		if ((copy = end - offset) > 0) {
576 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
577 
578 			if (copy > len)
579 				copy = len;
580 			sg[elt].page = frag->page;
581 			sg[elt].offset = frag->page_offset+offset-start;
582 			sg[elt].length = copy;
583 			elt++;
584 			if (!(len -= copy))
585 				return elt;
586 			offset += copy;
587 		}
588 		start = end;
589 	}
590 
591 	if (skb_shinfo(skb)->frag_list) {
592 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
593 
594 		for (; list; list = list->next) {
595 			int end;
596 
597 			BUG_TRAP(start <= offset + len);
598 
599 			end = start + list->len;
600 			if ((copy = end - offset) > 0) {
601 				if (copy > len)
602 					copy = len;
603 				elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
604 				if ((len -= copy) == 0)
605 					return elt;
606 				offset += copy;
607 			}
608 			start = end;
609 		}
610 	}
611 	BUG_ON(len);
612 	return elt;
613 }
614 EXPORT_SYMBOL_GPL(skb_to_sgvec);
615 
616 /* Check that skb data bits are writable. If they are not, copy data
617  * to newly created private area. If "tailbits" is given, make sure that
618  * tailbits bytes beyond current end of skb are writable.
619  *
620  * Returns amount of elements of scatterlist to load for subsequent
621  * transformations and pointer to writable trailer skb.
622  */
623 
624 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
625 {
626 	int copyflag;
627 	int elt;
628 	struct sk_buff *skb1, **skb_p;
629 
630 	/* If skb is cloned or its head is paged, reallocate
631 	 * head pulling out all the pages (pages are considered not writable
632 	 * at the moment even if they are anonymous).
633 	 */
634 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
635 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
636 		return -ENOMEM;
637 
638 	/* Easy case. Most of packets will go this way. */
639 	if (!skb_shinfo(skb)->frag_list) {
640 		/* A little of trouble, not enough of space for trailer.
641 		 * This should not happen, when stack is tuned to generate
642 		 * good frames. OK, on miss we reallocate and reserve even more
643 		 * space, 128 bytes is fair. */
644 
645 		if (skb_tailroom(skb) < tailbits &&
646 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
647 			return -ENOMEM;
648 
649 		/* Voila! */
650 		*trailer = skb;
651 		return 1;
652 	}
653 
654 	/* Misery. We are in troubles, going to mincer fragments... */
655 
656 	elt = 1;
657 	skb_p = &skb_shinfo(skb)->frag_list;
658 	copyflag = 0;
659 
660 	while ((skb1 = *skb_p) != NULL) {
661 		int ntail = 0;
662 
663 		/* The fragment is partially pulled by someone,
664 		 * this can happen on input. Copy it and everything
665 		 * after it. */
666 
667 		if (skb_shared(skb1))
668 			copyflag = 1;
669 
670 		/* If the skb is the last, worry about trailer. */
671 
672 		if (skb1->next == NULL && tailbits) {
673 			if (skb_shinfo(skb1)->nr_frags ||
674 			    skb_shinfo(skb1)->frag_list ||
675 			    skb_tailroom(skb1) < tailbits)
676 				ntail = tailbits + 128;
677 		}
678 
679 		if (copyflag ||
680 		    skb_cloned(skb1) ||
681 		    ntail ||
682 		    skb_shinfo(skb1)->nr_frags ||
683 		    skb_shinfo(skb1)->frag_list) {
684 			struct sk_buff *skb2;
685 
686 			/* Fuck, we are miserable poor guys... */
687 			if (ntail == 0)
688 				skb2 = skb_copy(skb1, GFP_ATOMIC);
689 			else
690 				skb2 = skb_copy_expand(skb1,
691 						       skb_headroom(skb1),
692 						       ntail,
693 						       GFP_ATOMIC);
694 			if (unlikely(skb2 == NULL))
695 				return -ENOMEM;
696 
697 			if (skb1->sk)
698 				skb_set_owner_w(skb2, skb1->sk);
699 
700 			/* Looking around. Are we still alive?
701 			 * OK, link new skb, drop old one */
702 
703 			skb2->next = skb1->next;
704 			*skb_p = skb2;
705 			kfree_skb(skb1);
706 			skb1 = skb2;
707 		}
708 		elt++;
709 		*trailer = skb1;
710 		skb_p = &skb1->next;
711 	}
712 
713 	return elt;
714 }
715 EXPORT_SYMBOL_GPL(skb_cow_data);
716 
717 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
718 {
719 	if (tail != skb) {
720 		skb->data_len += len;
721 		skb->len += len;
722 	}
723 	return skb_put(tail, len);
724 }
725 EXPORT_SYMBOL_GPL(pskb_put);
726 #endif
727