xref: /linux/crypto/crypto_user.c (revision 74ce1896c6c65b2f8cccbf59162d542988835835)
1 /*
2  * Crypto user configuration API.
3  *
4  * Copyright (C) 2011 secunet Security Networks AG
5  * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19  */
20 
21 #include <linux/module.h>
22 #include <linux/crypto.h>
23 #include <linux/cryptouser.h>
24 #include <linux/sched.h>
25 #include <net/netlink.h>
26 #include <linux/security.h>
27 #include <net/net_namespace.h>
28 #include <crypto/internal/skcipher.h>
29 #include <crypto/internal/rng.h>
30 #include <crypto/akcipher.h>
31 #include <crypto/kpp.h>
32 
33 #include "internal.h"
34 
35 #define null_terminated(x)	(strnlen(x, sizeof(x)) < sizeof(x))
36 
37 static DEFINE_MUTEX(crypto_cfg_mutex);
38 
39 /* The crypto netlink socket */
40 static struct sock *crypto_nlsk;
41 
42 struct crypto_dump_info {
43 	struct sk_buff *in_skb;
44 	struct sk_buff *out_skb;
45 	u32 nlmsg_seq;
46 	u16 nlmsg_flags;
47 };
48 
49 static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
50 {
51 	struct crypto_alg *q, *alg = NULL;
52 
53 	down_read(&crypto_alg_sem);
54 
55 	list_for_each_entry(q, &crypto_alg_list, cra_list) {
56 		int match = 0;
57 
58 		if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
59 			continue;
60 
61 		if (strlen(p->cru_driver_name))
62 			match = !strcmp(q->cra_driver_name,
63 					p->cru_driver_name);
64 		else if (!exact)
65 			match = !strcmp(q->cra_name, p->cru_name);
66 
67 		if (!match)
68 			continue;
69 
70 		if (unlikely(!crypto_mod_get(q)))
71 			continue;
72 
73 		alg = q;
74 		break;
75 	}
76 
77 	up_read(&crypto_alg_sem);
78 
79 	return alg;
80 }
81 
82 static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
83 {
84 	struct crypto_report_cipher rcipher;
85 
86 	strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
87 
88 	rcipher.blocksize = alg->cra_blocksize;
89 	rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
90 	rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
91 
92 	if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
93 		    sizeof(struct crypto_report_cipher), &rcipher))
94 		goto nla_put_failure;
95 	return 0;
96 
97 nla_put_failure:
98 	return -EMSGSIZE;
99 }
100 
101 static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
102 {
103 	struct crypto_report_comp rcomp;
104 
105 	strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
106 	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
107 		    sizeof(struct crypto_report_comp), &rcomp))
108 		goto nla_put_failure;
109 	return 0;
110 
111 nla_put_failure:
112 	return -EMSGSIZE;
113 }
114 
115 static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
116 {
117 	struct crypto_report_acomp racomp;
118 
119 	strlcpy(racomp.type, "acomp", sizeof(racomp.type));
120 
121 	if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
122 		    sizeof(struct crypto_report_acomp), &racomp))
123 		goto nla_put_failure;
124 	return 0;
125 
126 nla_put_failure:
127 	return -EMSGSIZE;
128 }
129 
130 static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
131 {
132 	struct crypto_report_akcipher rakcipher;
133 
134 	strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
135 
136 	if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
137 		    sizeof(struct crypto_report_akcipher), &rakcipher))
138 		goto nla_put_failure;
139 	return 0;
140 
141 nla_put_failure:
142 	return -EMSGSIZE;
143 }
144 
145 static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
146 {
147 	struct crypto_report_kpp rkpp;
148 
149 	strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
150 
151 	if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
152 		    sizeof(struct crypto_report_kpp), &rkpp))
153 		goto nla_put_failure;
154 	return 0;
155 
156 nla_put_failure:
157 	return -EMSGSIZE;
158 }
159 
160 static int crypto_report_one(struct crypto_alg *alg,
161 			     struct crypto_user_alg *ualg, struct sk_buff *skb)
162 {
163 	strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
164 	strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
165 		sizeof(ualg->cru_driver_name));
166 	strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
167 		sizeof(ualg->cru_module_name));
168 
169 	ualg->cru_type = 0;
170 	ualg->cru_mask = 0;
171 	ualg->cru_flags = alg->cra_flags;
172 	ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
173 
174 	if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
175 		goto nla_put_failure;
176 	if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
177 		struct crypto_report_larval rl;
178 
179 		strlcpy(rl.type, "larval", sizeof(rl.type));
180 		if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
181 			    sizeof(struct crypto_report_larval), &rl))
182 			goto nla_put_failure;
183 		goto out;
184 	}
185 
186 	if (alg->cra_type && alg->cra_type->report) {
187 		if (alg->cra_type->report(skb, alg))
188 			goto nla_put_failure;
189 
190 		goto out;
191 	}
192 
193 	switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
194 	case CRYPTO_ALG_TYPE_CIPHER:
195 		if (crypto_report_cipher(skb, alg))
196 			goto nla_put_failure;
197 
198 		break;
199 	case CRYPTO_ALG_TYPE_COMPRESS:
200 		if (crypto_report_comp(skb, alg))
201 			goto nla_put_failure;
202 
203 		break;
204 	case CRYPTO_ALG_TYPE_ACOMPRESS:
205 		if (crypto_report_acomp(skb, alg))
206 			goto nla_put_failure;
207 
208 		break;
209 	case CRYPTO_ALG_TYPE_AKCIPHER:
210 		if (crypto_report_akcipher(skb, alg))
211 			goto nla_put_failure;
212 
213 		break;
214 	case CRYPTO_ALG_TYPE_KPP:
215 		if (crypto_report_kpp(skb, alg))
216 			goto nla_put_failure;
217 		break;
218 	}
219 
220 out:
221 	return 0;
222 
223 nla_put_failure:
224 	return -EMSGSIZE;
225 }
226 
227 static int crypto_report_alg(struct crypto_alg *alg,
228 			     struct crypto_dump_info *info)
229 {
230 	struct sk_buff *in_skb = info->in_skb;
231 	struct sk_buff *skb = info->out_skb;
232 	struct nlmsghdr *nlh;
233 	struct crypto_user_alg *ualg;
234 	int err = 0;
235 
236 	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
237 			CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
238 	if (!nlh) {
239 		err = -EMSGSIZE;
240 		goto out;
241 	}
242 
243 	ualg = nlmsg_data(nlh);
244 
245 	err = crypto_report_one(alg, ualg, skb);
246 	if (err) {
247 		nlmsg_cancel(skb, nlh);
248 		goto out;
249 	}
250 
251 	nlmsg_end(skb, nlh);
252 
253 out:
254 	return err;
255 }
256 
257 static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
258 			 struct nlattr **attrs)
259 {
260 	struct crypto_user_alg *p = nlmsg_data(in_nlh);
261 	struct crypto_alg *alg;
262 	struct sk_buff *skb;
263 	struct crypto_dump_info info;
264 	int err;
265 
266 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
267 		return -EINVAL;
268 
269 	alg = crypto_alg_match(p, 0);
270 	if (!alg)
271 		return -ENOENT;
272 
273 	err = -ENOMEM;
274 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
275 	if (!skb)
276 		goto drop_alg;
277 
278 	info.in_skb = in_skb;
279 	info.out_skb = skb;
280 	info.nlmsg_seq = in_nlh->nlmsg_seq;
281 	info.nlmsg_flags = 0;
282 
283 	err = crypto_report_alg(alg, &info);
284 
285 drop_alg:
286 	crypto_mod_put(alg);
287 
288 	if (err)
289 		return err;
290 
291 	return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
292 }
293 
294 static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
295 {
296 	struct crypto_alg *alg;
297 	struct crypto_dump_info info;
298 	int err;
299 
300 	if (cb->args[0])
301 		goto out;
302 
303 	cb->args[0] = 1;
304 
305 	info.in_skb = cb->skb;
306 	info.out_skb = skb;
307 	info.nlmsg_seq = cb->nlh->nlmsg_seq;
308 	info.nlmsg_flags = NLM_F_MULTI;
309 
310 	list_for_each_entry(alg, &crypto_alg_list, cra_list) {
311 		err = crypto_report_alg(alg, &info);
312 		if (err)
313 			goto out_err;
314 	}
315 
316 out:
317 	return skb->len;
318 out_err:
319 	return err;
320 }
321 
322 static int crypto_dump_report_done(struct netlink_callback *cb)
323 {
324 	return 0;
325 }
326 
327 static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
328 			     struct nlattr **attrs)
329 {
330 	struct crypto_alg *alg;
331 	struct crypto_user_alg *p = nlmsg_data(nlh);
332 	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
333 	LIST_HEAD(list);
334 
335 	if (!netlink_capable(skb, CAP_NET_ADMIN))
336 		return -EPERM;
337 
338 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
339 		return -EINVAL;
340 
341 	if (priority && !strlen(p->cru_driver_name))
342 		return -EINVAL;
343 
344 	alg = crypto_alg_match(p, 1);
345 	if (!alg)
346 		return -ENOENT;
347 
348 	down_write(&crypto_alg_sem);
349 
350 	crypto_remove_spawns(alg, &list, NULL);
351 
352 	if (priority)
353 		alg->cra_priority = nla_get_u32(priority);
354 
355 	up_write(&crypto_alg_sem);
356 
357 	crypto_mod_put(alg);
358 	crypto_remove_final(&list);
359 
360 	return 0;
361 }
362 
363 static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
364 			  struct nlattr **attrs)
365 {
366 	struct crypto_alg *alg;
367 	struct crypto_user_alg *p = nlmsg_data(nlh);
368 	int err;
369 
370 	if (!netlink_capable(skb, CAP_NET_ADMIN))
371 		return -EPERM;
372 
373 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
374 		return -EINVAL;
375 
376 	alg = crypto_alg_match(p, 1);
377 	if (!alg)
378 		return -ENOENT;
379 
380 	/* We can not unregister core algorithms such as aes-generic.
381 	 * We would loose the reference in the crypto_alg_list to this algorithm
382 	 * if we try to unregister. Unregistering such an algorithm without
383 	 * removing the module is not possible, so we restrict to crypto
384 	 * instances that are build from templates. */
385 	err = -EINVAL;
386 	if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
387 		goto drop_alg;
388 
389 	err = -EBUSY;
390 	if (atomic_read(&alg->cra_refcnt) > 2)
391 		goto drop_alg;
392 
393 	err = crypto_unregister_instance((struct crypto_instance *)alg);
394 
395 drop_alg:
396 	crypto_mod_put(alg);
397 	return err;
398 }
399 
400 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
401 			  struct nlattr **attrs)
402 {
403 	int exact = 0;
404 	const char *name;
405 	struct crypto_alg *alg;
406 	struct crypto_user_alg *p = nlmsg_data(nlh);
407 	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
408 
409 	if (!netlink_capable(skb, CAP_NET_ADMIN))
410 		return -EPERM;
411 
412 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
413 		return -EINVAL;
414 
415 	if (strlen(p->cru_driver_name))
416 		exact = 1;
417 
418 	if (priority && !exact)
419 		return -EINVAL;
420 
421 	alg = crypto_alg_match(p, exact);
422 	if (alg) {
423 		crypto_mod_put(alg);
424 		return -EEXIST;
425 	}
426 
427 	if (strlen(p->cru_driver_name))
428 		name = p->cru_driver_name;
429 	else
430 		name = p->cru_name;
431 
432 	alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
433 	if (IS_ERR(alg))
434 		return PTR_ERR(alg);
435 
436 	down_write(&crypto_alg_sem);
437 
438 	if (priority)
439 		alg->cra_priority = nla_get_u32(priority);
440 
441 	up_write(&crypto_alg_sem);
442 
443 	crypto_mod_put(alg);
444 
445 	return 0;
446 }
447 
448 static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
449 			  struct nlattr **attrs)
450 {
451 	if (!netlink_capable(skb, CAP_NET_ADMIN))
452 		return -EPERM;
453 	return crypto_del_default_rng();
454 }
455 
456 #define MSGSIZE(type) sizeof(struct type)
457 
458 static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
459 	[CRYPTO_MSG_NEWALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
460 	[CRYPTO_MSG_DELALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
461 	[CRYPTO_MSG_UPDATEALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
462 	[CRYPTO_MSG_GETALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
463 	[CRYPTO_MSG_DELRNG	- CRYPTO_MSG_BASE] = 0,
464 };
465 
466 static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
467 	[CRYPTOCFGA_PRIORITY_VAL]   = { .type = NLA_U32},
468 };
469 
470 #undef MSGSIZE
471 
472 static const struct crypto_link {
473 	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
474 	int (*dump)(struct sk_buff *, struct netlink_callback *);
475 	int (*done)(struct netlink_callback *);
476 } crypto_dispatch[CRYPTO_NR_MSGTYPES] = {
477 	[CRYPTO_MSG_NEWALG	- CRYPTO_MSG_BASE] = { .doit = crypto_add_alg},
478 	[CRYPTO_MSG_DELALG	- CRYPTO_MSG_BASE] = { .doit = crypto_del_alg},
479 	[CRYPTO_MSG_UPDATEALG	- CRYPTO_MSG_BASE] = { .doit = crypto_update_alg},
480 	[CRYPTO_MSG_GETALG	- CRYPTO_MSG_BASE] = { .doit = crypto_report,
481 						       .dump = crypto_dump_report,
482 						       .done = crypto_dump_report_done},
483 	[CRYPTO_MSG_DELRNG	- CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
484 };
485 
486 static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
487 			       struct netlink_ext_ack *extack)
488 {
489 	struct nlattr *attrs[CRYPTOCFGA_MAX+1];
490 	const struct crypto_link *link;
491 	int type, err;
492 
493 	type = nlh->nlmsg_type;
494 	if (type > CRYPTO_MSG_MAX)
495 		return -EINVAL;
496 
497 	type -= CRYPTO_MSG_BASE;
498 	link = &crypto_dispatch[type];
499 
500 	if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
501 	    (nlh->nlmsg_flags & NLM_F_DUMP))) {
502 		struct crypto_alg *alg;
503 		u16 dump_alloc = 0;
504 
505 		if (link->dump == NULL)
506 			return -EINVAL;
507 
508 		down_read(&crypto_alg_sem);
509 		list_for_each_entry(alg, &crypto_alg_list, cra_list)
510 			dump_alloc += CRYPTO_REPORT_MAXSIZE;
511 
512 		{
513 			struct netlink_dump_control c = {
514 				.dump = link->dump,
515 				.done = link->done,
516 				.min_dump_alloc = dump_alloc,
517 			};
518 			err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
519 		}
520 		up_read(&crypto_alg_sem);
521 
522 		return err;
523 	}
524 
525 	err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
526 			  crypto_policy, extack);
527 	if (err < 0)
528 		return err;
529 
530 	if (link->doit == NULL)
531 		return -EINVAL;
532 
533 	return link->doit(skb, nlh, attrs);
534 }
535 
536 static void crypto_netlink_rcv(struct sk_buff *skb)
537 {
538 	mutex_lock(&crypto_cfg_mutex);
539 	netlink_rcv_skb(skb, &crypto_user_rcv_msg);
540 	mutex_unlock(&crypto_cfg_mutex);
541 }
542 
543 static int __init crypto_user_init(void)
544 {
545 	struct netlink_kernel_cfg cfg = {
546 		.input	= crypto_netlink_rcv,
547 	};
548 
549 	crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, &cfg);
550 	if (!crypto_nlsk)
551 		return -ENOMEM;
552 
553 	return 0;
554 }
555 
556 static void __exit crypto_user_exit(void)
557 {
558 	netlink_kernel_release(crypto_nlsk);
559 }
560 
561 module_init(crypto_user_init);
562 module_exit(crypto_user_exit);
563 MODULE_LICENSE("GPL");
564 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
565 MODULE_DESCRIPTION("Crypto userspace configuration API");
566 MODULE_ALIAS("net-pf-16-proto-21");
567