xref: /linux/crypto/crypto_user.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  * Crypto user configuration API.
3  *
4  * Copyright (C) 2011 secunet Security Networks AG
5  * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19  */
20 
21 #include <linux/module.h>
22 #include <linux/crypto.h>
23 #include <linux/cryptouser.h>
24 #include <linux/sched.h>
25 #include <net/netlink.h>
26 #include <linux/security.h>
27 #include <net/net_namespace.h>
28 #include <crypto/internal/aead.h>
29 #include <crypto/internal/skcipher.h>
30 
31 #include "internal.h"
32 
33 static DEFINE_MUTEX(crypto_cfg_mutex);
34 
35 /* The crypto netlink socket */
36 static struct sock *crypto_nlsk;
37 
38 struct crypto_dump_info {
39 	struct sk_buff *in_skb;
40 	struct sk_buff *out_skb;
41 	u32 nlmsg_seq;
42 	u16 nlmsg_flags;
43 };
44 
45 static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
46 {
47 	struct crypto_alg *q, *alg = NULL;
48 
49 	down_read(&crypto_alg_sem);
50 
51 	list_for_each_entry(q, &crypto_alg_list, cra_list) {
52 		int match = 0;
53 
54 		if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
55 			continue;
56 
57 		if (strlen(p->cru_driver_name))
58 			match = !strcmp(q->cra_driver_name,
59 					p->cru_driver_name);
60 		else if (!exact)
61 			match = !strcmp(q->cra_name, p->cru_name);
62 
63 		if (match) {
64 			alg = q;
65 			break;
66 		}
67 	}
68 
69 	up_read(&crypto_alg_sem);
70 
71 	return alg;
72 }
73 
74 static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
75 {
76 	struct crypto_report_cipher rcipher;
77 
78 	snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher");
79 
80 	rcipher.blocksize = alg->cra_blocksize;
81 	rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
82 	rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
83 
84 	if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
85 		    sizeof(struct crypto_report_cipher), &rcipher))
86 		goto nla_put_failure;
87 	return 0;
88 
89 nla_put_failure:
90 	return -EMSGSIZE;
91 }
92 
93 static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
94 {
95 	struct crypto_report_comp rcomp;
96 
97 	snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
98 
99 	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
100 		    sizeof(struct crypto_report_comp), &rcomp))
101 		goto nla_put_failure;
102 	return 0;
103 
104 nla_put_failure:
105 	return -EMSGSIZE;
106 }
107 
108 static int crypto_report_one(struct crypto_alg *alg,
109 			     struct crypto_user_alg *ualg, struct sk_buff *skb)
110 {
111 	memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
112 	memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
113 	       sizeof(ualg->cru_driver_name));
114 	memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
115 	       CRYPTO_MAX_ALG_NAME);
116 
117 	ualg->cru_flags = alg->cra_flags;
118 	ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
119 
120 	if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
121 		goto nla_put_failure;
122 	if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
123 		struct crypto_report_larval rl;
124 
125 		snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
126 
127 		if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
128 			    sizeof(struct crypto_report_larval), &rl))
129 			goto nla_put_failure;
130 		goto out;
131 	}
132 
133 	if (alg->cra_type && alg->cra_type->report) {
134 		if (alg->cra_type->report(skb, alg))
135 			goto nla_put_failure;
136 
137 		goto out;
138 	}
139 
140 	switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
141 	case CRYPTO_ALG_TYPE_CIPHER:
142 		if (crypto_report_cipher(skb, alg))
143 			goto nla_put_failure;
144 
145 		break;
146 	case CRYPTO_ALG_TYPE_COMPRESS:
147 		if (crypto_report_comp(skb, alg))
148 			goto nla_put_failure;
149 
150 		break;
151 	}
152 
153 out:
154 	return 0;
155 
156 nla_put_failure:
157 	return -EMSGSIZE;
158 }
159 
160 static int crypto_report_alg(struct crypto_alg *alg,
161 			     struct crypto_dump_info *info)
162 {
163 	struct sk_buff *in_skb = info->in_skb;
164 	struct sk_buff *skb = info->out_skb;
165 	struct nlmsghdr *nlh;
166 	struct crypto_user_alg *ualg;
167 	int err = 0;
168 
169 	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
170 			CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
171 	if (!nlh) {
172 		err = -EMSGSIZE;
173 		goto out;
174 	}
175 
176 	ualg = nlmsg_data(nlh);
177 
178 	err = crypto_report_one(alg, ualg, skb);
179 	if (err) {
180 		nlmsg_cancel(skb, nlh);
181 		goto out;
182 	}
183 
184 	nlmsg_end(skb, nlh);
185 
186 out:
187 	return err;
188 }
189 
190 static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
191 			 struct nlattr **attrs)
192 {
193 	struct crypto_user_alg *p = nlmsg_data(in_nlh);
194 	struct crypto_alg *alg;
195 	struct sk_buff *skb;
196 	struct crypto_dump_info info;
197 	int err;
198 
199 	if (!p->cru_driver_name)
200 		return -EINVAL;
201 
202 	alg = crypto_alg_match(p, 1);
203 	if (!alg)
204 		return -ENOENT;
205 
206 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
207 	if (!skb)
208 		return -ENOMEM;
209 
210 	info.in_skb = in_skb;
211 	info.out_skb = skb;
212 	info.nlmsg_seq = in_nlh->nlmsg_seq;
213 	info.nlmsg_flags = 0;
214 
215 	err = crypto_report_alg(alg, &info);
216 	if (err)
217 		return err;
218 
219 	return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
220 }
221 
222 static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
223 {
224 	struct crypto_alg *alg;
225 	struct crypto_dump_info info;
226 	int err;
227 
228 	if (cb->args[0])
229 		goto out;
230 
231 	cb->args[0] = 1;
232 
233 	info.in_skb = cb->skb;
234 	info.out_skb = skb;
235 	info.nlmsg_seq = cb->nlh->nlmsg_seq;
236 	info.nlmsg_flags = NLM_F_MULTI;
237 
238 	list_for_each_entry(alg, &crypto_alg_list, cra_list) {
239 		err = crypto_report_alg(alg, &info);
240 		if (err)
241 			goto out_err;
242 	}
243 
244 out:
245 	return skb->len;
246 out_err:
247 	return err;
248 }
249 
250 static int crypto_dump_report_done(struct netlink_callback *cb)
251 {
252 	return 0;
253 }
254 
255 static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
256 			     struct nlattr **attrs)
257 {
258 	struct crypto_alg *alg;
259 	struct crypto_user_alg *p = nlmsg_data(nlh);
260 	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
261 	LIST_HEAD(list);
262 
263 	if (priority && !strlen(p->cru_driver_name))
264 		return -EINVAL;
265 
266 	alg = crypto_alg_match(p, 1);
267 	if (!alg)
268 		return -ENOENT;
269 
270 	down_write(&crypto_alg_sem);
271 
272 	crypto_remove_spawns(alg, &list, NULL);
273 
274 	if (priority)
275 		alg->cra_priority = nla_get_u32(priority);
276 
277 	up_write(&crypto_alg_sem);
278 
279 	crypto_remove_final(&list);
280 
281 	return 0;
282 }
283 
284 static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
285 			  struct nlattr **attrs)
286 {
287 	struct crypto_alg *alg;
288 	struct crypto_user_alg *p = nlmsg_data(nlh);
289 
290 	alg = crypto_alg_match(p, 1);
291 	if (!alg)
292 		return -ENOENT;
293 
294 	/* We can not unregister core algorithms such as aes-generic.
295 	 * We would loose the reference in the crypto_alg_list to this algorithm
296 	 * if we try to unregister. Unregistering such an algorithm without
297 	 * removing the module is not possible, so we restrict to crypto
298 	 * instances that are build from templates. */
299 	if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
300 		return -EINVAL;
301 
302 	if (atomic_read(&alg->cra_refcnt) != 1)
303 		return -EBUSY;
304 
305 	return crypto_unregister_instance(alg);
306 }
307 
308 static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
309 						   u32 mask)
310 {
311 	int err;
312 	struct crypto_alg *alg;
313 
314 	type = crypto_skcipher_type(type);
315 	mask = crypto_skcipher_mask(mask);
316 
317 	for (;;) {
318 		alg = crypto_lookup_skcipher(name,  type, mask);
319 		if (!IS_ERR(alg))
320 			return alg;
321 
322 		err = PTR_ERR(alg);
323 		if (err != -EAGAIN)
324 			break;
325 		if (signal_pending(current)) {
326 			err = -EINTR;
327 			break;
328 		}
329 	}
330 
331 	return ERR_PTR(err);
332 }
333 
334 static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
335 					       u32 mask)
336 {
337 	int err;
338 	struct crypto_alg *alg;
339 
340 	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
341 	type |= CRYPTO_ALG_TYPE_AEAD;
342 	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
343 	mask |= CRYPTO_ALG_TYPE_MASK;
344 
345 	for (;;) {
346 		alg = crypto_lookup_aead(name,  type, mask);
347 		if (!IS_ERR(alg))
348 			return alg;
349 
350 		err = PTR_ERR(alg);
351 		if (err != -EAGAIN)
352 			break;
353 		if (signal_pending(current)) {
354 			err = -EINTR;
355 			break;
356 		}
357 	}
358 
359 	return ERR_PTR(err);
360 }
361 
362 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
363 			  struct nlattr **attrs)
364 {
365 	int exact = 0;
366 	const char *name;
367 	struct crypto_alg *alg;
368 	struct crypto_user_alg *p = nlmsg_data(nlh);
369 	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
370 
371 	if (strlen(p->cru_driver_name))
372 		exact = 1;
373 
374 	if (priority && !exact)
375 		return -EINVAL;
376 
377 	alg = crypto_alg_match(p, exact);
378 	if (alg)
379 		return -EEXIST;
380 
381 	if (strlen(p->cru_driver_name))
382 		name = p->cru_driver_name;
383 	else
384 		name = p->cru_name;
385 
386 	switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
387 	case CRYPTO_ALG_TYPE_AEAD:
388 		alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
389 		break;
390 	case CRYPTO_ALG_TYPE_GIVCIPHER:
391 	case CRYPTO_ALG_TYPE_BLKCIPHER:
392 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
393 		alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
394 		break;
395 	default:
396 		alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
397 	}
398 
399 	if (IS_ERR(alg))
400 		return PTR_ERR(alg);
401 
402 	down_write(&crypto_alg_sem);
403 
404 	if (priority)
405 		alg->cra_priority = nla_get_u32(priority);
406 
407 	up_write(&crypto_alg_sem);
408 
409 	crypto_mod_put(alg);
410 
411 	return 0;
412 }
413 
414 #define MSGSIZE(type) sizeof(struct type)
415 
416 static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
417 	[CRYPTO_MSG_NEWALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
418 	[CRYPTO_MSG_DELALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
419 	[CRYPTO_MSG_UPDATEALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
420 	[CRYPTO_MSG_GETALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
421 };
422 
423 static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
424 	[CRYPTOCFGA_PRIORITY_VAL]   = { .type = NLA_U32},
425 };
426 
427 #undef MSGSIZE
428 
429 static struct crypto_link {
430 	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
431 	int (*dump)(struct sk_buff *, struct netlink_callback *);
432 	int (*done)(struct netlink_callback *);
433 } crypto_dispatch[CRYPTO_NR_MSGTYPES] = {
434 	[CRYPTO_MSG_NEWALG	- CRYPTO_MSG_BASE] = { .doit = crypto_add_alg},
435 	[CRYPTO_MSG_DELALG	- CRYPTO_MSG_BASE] = { .doit = crypto_del_alg},
436 	[CRYPTO_MSG_UPDATEALG	- CRYPTO_MSG_BASE] = { .doit = crypto_update_alg},
437 	[CRYPTO_MSG_GETALG	- CRYPTO_MSG_BASE] = { .doit = crypto_report,
438 						       .dump = crypto_dump_report,
439 						       .done = crypto_dump_report_done},
440 };
441 
442 static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
443 {
444 	struct nlattr *attrs[CRYPTOCFGA_MAX+1];
445 	struct crypto_link *link;
446 	int type, err;
447 
448 	type = nlh->nlmsg_type;
449 	if (type > CRYPTO_MSG_MAX)
450 		return -EINVAL;
451 
452 	type -= CRYPTO_MSG_BASE;
453 	link = &crypto_dispatch[type];
454 
455 	if (!capable(CAP_NET_ADMIN))
456 		return -EPERM;
457 
458 	if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
459 	    (nlh->nlmsg_flags & NLM_F_DUMP))) {
460 		struct crypto_alg *alg;
461 		u16 dump_alloc = 0;
462 
463 		if (link->dump == NULL)
464 			return -EINVAL;
465 
466 		list_for_each_entry(alg, &crypto_alg_list, cra_list)
467 			dump_alloc += CRYPTO_REPORT_MAXSIZE;
468 
469 		{
470 			struct netlink_dump_control c = {
471 				.dump = link->dump,
472 				.done = link->done,
473 				.min_dump_alloc = dump_alloc,
474 			};
475 			return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
476 		}
477 	}
478 
479 	err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
480 			  crypto_policy);
481 	if (err < 0)
482 		return err;
483 
484 	if (link->doit == NULL)
485 		return -EINVAL;
486 
487 	return link->doit(skb, nlh, attrs);
488 }
489 
490 static void crypto_netlink_rcv(struct sk_buff *skb)
491 {
492 	mutex_lock(&crypto_cfg_mutex);
493 	netlink_rcv_skb(skb, &crypto_user_rcv_msg);
494 	mutex_unlock(&crypto_cfg_mutex);
495 }
496 
497 static int __init crypto_user_init(void)
498 {
499 	struct netlink_kernel_cfg cfg = {
500 		.input	= crypto_netlink_rcv,
501 	};
502 
503 	crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, &cfg);
504 	if (!crypto_nlsk)
505 		return -ENOMEM;
506 
507 	return 0;
508 }
509 
510 static void __exit crypto_user_exit(void)
511 {
512 	netlink_kernel_release(crypto_nlsk);
513 }
514 
515 module_init(crypto_user_init);
516 module_exit(crypto_user_exit);
517 MODULE_LICENSE("GPL");
518 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
519 MODULE_DESCRIPTION("Crypto userspace configuration API");
520