xref: /linux/drivers/char/hw_random/core.c (revision a594533df0f6ca391da003f43d53b336a2d23ffa)
1 /*
2  * hw_random/core.c: HWRNG core API
3  *
4  * Copyright 2006 Michael Buesch <m@bues.ch>
5  * Copyright 2005 (c) MontaVista Software, Inc.
6  *
7  * Please read Documentation/admin-guide/hw_random.rst for details on use.
8  *
9  * This software may be used and distributed according to the terms
10  * of the GNU General Public License, incorporated herein by reference.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/hw_random.h>
18 #include <linux/random.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/sched/signal.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 
29 #define RNG_MODULE_NAME		"hw_random"
30 
31 static struct hwrng *current_rng;
32 /* the current rng has been explicitly chosen by user via sysfs */
33 static int cur_rng_set_by_user;
34 static struct task_struct *hwrng_fill;
35 /* list of registered rngs */
36 static LIST_HEAD(rng_list);
37 /* Protects rng_list and current_rng */
38 static DEFINE_MUTEX(rng_mutex);
39 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
40 static DEFINE_MUTEX(reading_mutex);
41 static int data_avail;
42 static u8 *rng_buffer, *rng_fillbuf;
43 static unsigned short current_quality;
44 static unsigned short default_quality; /* = 0; default to "off" */
45 
46 module_param(current_quality, ushort, 0644);
47 MODULE_PARM_DESC(current_quality,
48 		 "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
49 module_param(default_quality, ushort, 0644);
50 MODULE_PARM_DESC(default_quality,
51 		 "default entropy content of hwrng per 1024 bits of input");
52 
53 static void drop_current_rng(void);
54 static int hwrng_init(struct hwrng *rng);
55 static int hwrng_fillfn(void *unused);
56 
57 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
58 			       int wait);
59 
60 static size_t rng_buffer_size(void)
61 {
62 	return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
63 }
64 
65 static void add_early_randomness(struct hwrng *rng)
66 {
67 	int bytes_read;
68 
69 	mutex_lock(&reading_mutex);
70 	bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
71 	mutex_unlock(&reading_mutex);
72 	if (bytes_read > 0) {
73 		size_t entropy = bytes_read * 8 * rng->quality / 1024;
74 		add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false);
75 	}
76 }
77 
78 static inline void cleanup_rng(struct kref *kref)
79 {
80 	struct hwrng *rng = container_of(kref, struct hwrng, ref);
81 
82 	if (rng->cleanup)
83 		rng->cleanup(rng);
84 
85 	complete(&rng->cleanup_done);
86 }
87 
88 static int set_current_rng(struct hwrng *rng)
89 {
90 	int err;
91 
92 	BUG_ON(!mutex_is_locked(&rng_mutex));
93 
94 	err = hwrng_init(rng);
95 	if (err)
96 		return err;
97 
98 	drop_current_rng();
99 	current_rng = rng;
100 
101 	/* if necessary, start hwrng thread */
102 	if (!hwrng_fill) {
103 		hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
104 		if (IS_ERR(hwrng_fill)) {
105 			pr_err("hwrng_fill thread creation failed\n");
106 			hwrng_fill = NULL;
107 		}
108 	}
109 
110 	return 0;
111 }
112 
113 static void drop_current_rng(void)
114 {
115 	BUG_ON(!mutex_is_locked(&rng_mutex));
116 	if (!current_rng)
117 		return;
118 
119 	/* decrease last reference for triggering the cleanup */
120 	kref_put(&current_rng->ref, cleanup_rng);
121 	current_rng = NULL;
122 }
123 
124 /* Returns ERR_PTR(), NULL or refcounted hwrng */
125 static struct hwrng *get_current_rng_nolock(void)
126 {
127 	if (current_rng)
128 		kref_get(&current_rng->ref);
129 
130 	return current_rng;
131 }
132 
133 static struct hwrng *get_current_rng(void)
134 {
135 	struct hwrng *rng;
136 
137 	if (mutex_lock_interruptible(&rng_mutex))
138 		return ERR_PTR(-ERESTARTSYS);
139 
140 	rng = get_current_rng_nolock();
141 
142 	mutex_unlock(&rng_mutex);
143 	return rng;
144 }
145 
146 static void put_rng(struct hwrng *rng)
147 {
148 	/*
149 	 * Hold rng_mutex here so we serialize in case they set_current_rng
150 	 * on rng again immediately.
151 	 */
152 	mutex_lock(&rng_mutex);
153 	if (rng)
154 		kref_put(&rng->ref, cleanup_rng);
155 	mutex_unlock(&rng_mutex);
156 }
157 
158 static int hwrng_init(struct hwrng *rng)
159 {
160 	if (kref_get_unless_zero(&rng->ref))
161 		goto skip_init;
162 
163 	if (rng->init) {
164 		int ret;
165 
166 		ret =  rng->init(rng);
167 		if (ret)
168 			return ret;
169 	}
170 
171 	kref_init(&rng->ref);
172 	reinit_completion(&rng->cleanup_done);
173 
174 skip_init:
175 	if (!rng->quality)
176 		rng->quality = default_quality;
177 	if (rng->quality > 1024)
178 		rng->quality = 1024;
179 	current_quality = rng->quality; /* obsolete */
180 
181 	return 0;
182 }
183 
184 static int rng_dev_open(struct inode *inode, struct file *filp)
185 {
186 	/* enforce read-only access to this chrdev */
187 	if ((filp->f_mode & FMODE_READ) == 0)
188 		return -EINVAL;
189 	if (filp->f_mode & FMODE_WRITE)
190 		return -EINVAL;
191 	return 0;
192 }
193 
194 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
195 			int wait) {
196 	int present;
197 
198 	BUG_ON(!mutex_is_locked(&reading_mutex));
199 	if (rng->read)
200 		return rng->read(rng, (void *)buffer, size, wait);
201 
202 	if (rng->data_present)
203 		present = rng->data_present(rng, wait);
204 	else
205 		present = 1;
206 
207 	if (present)
208 		return rng->data_read(rng, (u32 *)buffer);
209 
210 	return 0;
211 }
212 
213 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
214 			    size_t size, loff_t *offp)
215 {
216 	ssize_t ret = 0;
217 	int err = 0;
218 	int bytes_read, len;
219 	struct hwrng *rng;
220 
221 	while (size) {
222 		rng = get_current_rng();
223 		if (IS_ERR(rng)) {
224 			err = PTR_ERR(rng);
225 			goto out;
226 		}
227 		if (!rng) {
228 			err = -ENODEV;
229 			goto out;
230 		}
231 
232 		if (mutex_lock_interruptible(&reading_mutex)) {
233 			err = -ERESTARTSYS;
234 			goto out_put;
235 		}
236 		if (!data_avail) {
237 			bytes_read = rng_get_data(rng, rng_buffer,
238 				rng_buffer_size(),
239 				!(filp->f_flags & O_NONBLOCK));
240 			if (bytes_read < 0) {
241 				err = bytes_read;
242 				goto out_unlock_reading;
243 			}
244 			data_avail = bytes_read;
245 		}
246 
247 		if (!data_avail) {
248 			if (filp->f_flags & O_NONBLOCK) {
249 				err = -EAGAIN;
250 				goto out_unlock_reading;
251 			}
252 		} else {
253 			len = data_avail;
254 			if (len > size)
255 				len = size;
256 
257 			data_avail -= len;
258 
259 			if (copy_to_user(buf + ret, rng_buffer + data_avail,
260 								len)) {
261 				err = -EFAULT;
262 				goto out_unlock_reading;
263 			}
264 
265 			size -= len;
266 			ret += len;
267 		}
268 
269 		mutex_unlock(&reading_mutex);
270 		put_rng(rng);
271 
272 		if (need_resched())
273 			schedule_timeout_interruptible(1);
274 
275 		if (signal_pending(current)) {
276 			err = -ERESTARTSYS;
277 			goto out;
278 		}
279 	}
280 out:
281 	return ret ? : err;
282 
283 out_unlock_reading:
284 	mutex_unlock(&reading_mutex);
285 out_put:
286 	put_rng(rng);
287 	goto out;
288 }
289 
290 static const struct file_operations rng_chrdev_ops = {
291 	.owner		= THIS_MODULE,
292 	.open		= rng_dev_open,
293 	.read		= rng_dev_read,
294 	.llseek		= noop_llseek,
295 };
296 
297 static const struct attribute_group *rng_dev_groups[];
298 
299 static struct miscdevice rng_miscdev = {
300 	.minor		= HWRNG_MINOR,
301 	.name		= RNG_MODULE_NAME,
302 	.nodename	= "hwrng",
303 	.fops		= &rng_chrdev_ops,
304 	.groups		= rng_dev_groups,
305 };
306 
307 static int enable_best_rng(void)
308 {
309 	struct hwrng *rng, *new_rng = NULL;
310 	int ret = -ENODEV;
311 
312 	BUG_ON(!mutex_is_locked(&rng_mutex));
313 
314 	/* no rng to use? */
315 	if (list_empty(&rng_list)) {
316 		drop_current_rng();
317 		cur_rng_set_by_user = 0;
318 		return 0;
319 	}
320 
321 	/* use the rng which offers the best quality */
322 	list_for_each_entry(rng, &rng_list, list) {
323 		if (!new_rng || rng->quality > new_rng->quality)
324 			new_rng = rng;
325 	}
326 
327 	ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
328 	if (!ret)
329 		cur_rng_set_by_user = 0;
330 
331 	return ret;
332 }
333 
334 static ssize_t rng_current_store(struct device *dev,
335 				 struct device_attribute *attr,
336 				 const char *buf, size_t len)
337 {
338 	int err;
339 	struct hwrng *rng, *old_rng, *new_rng;
340 
341 	err = mutex_lock_interruptible(&rng_mutex);
342 	if (err)
343 		return -ERESTARTSYS;
344 
345 	old_rng = current_rng;
346 	if (sysfs_streq(buf, "")) {
347 		err = enable_best_rng();
348 	} else {
349 		list_for_each_entry(rng, &rng_list, list) {
350 			if (sysfs_streq(rng->name, buf)) {
351 				err = set_current_rng(rng);
352 				if (!err)
353 					cur_rng_set_by_user = 1;
354 				break;
355 			}
356 		}
357 	}
358 	new_rng = get_current_rng_nolock();
359 	mutex_unlock(&rng_mutex);
360 
361 	if (new_rng) {
362 		if (new_rng != old_rng)
363 			add_early_randomness(new_rng);
364 		put_rng(new_rng);
365 	}
366 
367 	return err ? : len;
368 }
369 
370 static ssize_t rng_current_show(struct device *dev,
371 				struct device_attribute *attr,
372 				char *buf)
373 {
374 	ssize_t ret;
375 	struct hwrng *rng;
376 
377 	rng = get_current_rng();
378 	if (IS_ERR(rng))
379 		return PTR_ERR(rng);
380 
381 	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
382 	put_rng(rng);
383 
384 	return ret;
385 }
386 
387 static ssize_t rng_available_show(struct device *dev,
388 				  struct device_attribute *attr,
389 				  char *buf)
390 {
391 	int err;
392 	struct hwrng *rng;
393 
394 	err = mutex_lock_interruptible(&rng_mutex);
395 	if (err)
396 		return -ERESTARTSYS;
397 	buf[0] = '\0';
398 	list_for_each_entry(rng, &rng_list, list) {
399 		strlcat(buf, rng->name, PAGE_SIZE);
400 		strlcat(buf, " ", PAGE_SIZE);
401 	}
402 	strlcat(buf, "\n", PAGE_SIZE);
403 	mutex_unlock(&rng_mutex);
404 
405 	return strlen(buf);
406 }
407 
408 static ssize_t rng_selected_show(struct device *dev,
409 				 struct device_attribute *attr,
410 				 char *buf)
411 {
412 	return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
413 }
414 
415 static ssize_t rng_quality_show(struct device *dev,
416 				struct device_attribute *attr,
417 				char *buf)
418 {
419 	ssize_t ret;
420 	struct hwrng *rng;
421 
422 	rng = get_current_rng();
423 	if (IS_ERR(rng))
424 		return PTR_ERR(rng);
425 
426 	if (!rng) /* no need to put_rng */
427 		return -ENODEV;
428 
429 	ret = sysfs_emit(buf, "%hu\n", rng->quality);
430 	put_rng(rng);
431 
432 	return ret;
433 }
434 
435 static ssize_t rng_quality_store(struct device *dev,
436 				 struct device_attribute *attr,
437 				 const char *buf, size_t len)
438 {
439 	u16 quality;
440 	int ret = -EINVAL;
441 
442 	if (len < 2)
443 		return -EINVAL;
444 
445 	ret = mutex_lock_interruptible(&rng_mutex);
446 	if (ret)
447 		return -ERESTARTSYS;
448 
449 	ret = kstrtou16(buf, 0, &quality);
450 	if (ret || quality > 1024) {
451 		ret = -EINVAL;
452 		goto out;
453 	}
454 
455 	if (!current_rng) {
456 		ret = -ENODEV;
457 		goto out;
458 	}
459 
460 	current_rng->quality = quality;
461 	current_quality = quality; /* obsolete */
462 
463 	/* the best available RNG may have changed */
464 	ret = enable_best_rng();
465 
466 out:
467 	mutex_unlock(&rng_mutex);
468 	return ret ? ret : len;
469 }
470 
471 static DEVICE_ATTR_RW(rng_current);
472 static DEVICE_ATTR_RO(rng_available);
473 static DEVICE_ATTR_RO(rng_selected);
474 static DEVICE_ATTR_RW(rng_quality);
475 
476 static struct attribute *rng_dev_attrs[] = {
477 	&dev_attr_rng_current.attr,
478 	&dev_attr_rng_available.attr,
479 	&dev_attr_rng_selected.attr,
480 	&dev_attr_rng_quality.attr,
481 	NULL
482 };
483 
484 ATTRIBUTE_GROUPS(rng_dev);
485 
486 static void __exit unregister_miscdev(void)
487 {
488 	misc_deregister(&rng_miscdev);
489 }
490 
491 static int __init register_miscdev(void)
492 {
493 	return misc_register(&rng_miscdev);
494 }
495 
496 static int hwrng_fillfn(void *unused)
497 {
498 	size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
499 	long rc;
500 
501 	while (!kthread_should_stop()) {
502 		unsigned short quality;
503 		struct hwrng *rng;
504 
505 		rng = get_current_rng();
506 		if (IS_ERR(rng) || !rng)
507 			break;
508 		mutex_lock(&reading_mutex);
509 		rc = rng_get_data(rng, rng_fillbuf,
510 				  rng_buffer_size(), 1);
511 		if (current_quality != rng->quality)
512 			rng->quality = current_quality; /* obsolete */
513 		quality = rng->quality;
514 		mutex_unlock(&reading_mutex);
515 
516 		if (rc <= 0)
517 			hwrng_msleep(rng, 10000);
518 
519 		put_rng(rng);
520 
521 		if (rc <= 0)
522 			continue;
523 
524 		/* If we cannot credit at least one bit of entropy,
525 		 * keep track of the remainder for the next iteration
526 		 */
527 		entropy = rc * quality * 8 + entropy_credit;
528 		if ((entropy >> 10) == 0)
529 			entropy_credit = entropy;
530 
531 		/* Outside lock, sure, but y'know: randomness. */
532 		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
533 					   entropy >> 10, true);
534 	}
535 	hwrng_fill = NULL;
536 	return 0;
537 }
538 
539 int hwrng_register(struct hwrng *rng)
540 {
541 	int err = -EINVAL;
542 	struct hwrng *tmp;
543 	bool is_new_current = false;
544 
545 	if (!rng->name || (!rng->data_read && !rng->read))
546 		goto out;
547 
548 	mutex_lock(&rng_mutex);
549 
550 	/* Must not register two RNGs with the same name. */
551 	err = -EEXIST;
552 	list_for_each_entry(tmp, &rng_list, list) {
553 		if (strcmp(tmp->name, rng->name) == 0)
554 			goto out_unlock;
555 	}
556 	list_add_tail(&rng->list, &rng_list);
557 
558 	init_completion(&rng->cleanup_done);
559 	complete(&rng->cleanup_done);
560 	init_completion(&rng->dying);
561 
562 	if (!current_rng ||
563 	    (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
564 		/*
565 		 * Set new rng as current as the new rng source
566 		 * provides better entropy quality and was not
567 		 * chosen by userspace.
568 		 */
569 		err = set_current_rng(rng);
570 		if (err)
571 			goto out_unlock;
572 		/* to use current_rng in add_early_randomness() we need
573 		 * to take a ref
574 		 */
575 		is_new_current = true;
576 		kref_get(&rng->ref);
577 	}
578 	mutex_unlock(&rng_mutex);
579 	if (is_new_current || !rng->init) {
580 		/*
581 		 * Use a new device's input to add some randomness to
582 		 * the system.  If this rng device isn't going to be
583 		 * used right away, its init function hasn't been
584 		 * called yet by set_current_rng(); so only use the
585 		 * randomness from devices that don't need an init callback
586 		 */
587 		add_early_randomness(rng);
588 	}
589 	if (is_new_current)
590 		put_rng(rng);
591 	return 0;
592 out_unlock:
593 	mutex_unlock(&rng_mutex);
594 out:
595 	return err;
596 }
597 EXPORT_SYMBOL_GPL(hwrng_register);
598 
599 void hwrng_unregister(struct hwrng *rng)
600 {
601 	struct hwrng *old_rng, *new_rng;
602 	int err;
603 
604 	mutex_lock(&rng_mutex);
605 
606 	old_rng = current_rng;
607 	list_del(&rng->list);
608 	complete_all(&rng->dying);
609 	if (current_rng == rng) {
610 		err = enable_best_rng();
611 		if (err) {
612 			drop_current_rng();
613 			cur_rng_set_by_user = 0;
614 		}
615 	}
616 
617 	new_rng = get_current_rng_nolock();
618 	if (list_empty(&rng_list)) {
619 		mutex_unlock(&rng_mutex);
620 		if (hwrng_fill)
621 			kthread_stop(hwrng_fill);
622 	} else
623 		mutex_unlock(&rng_mutex);
624 
625 	if (new_rng) {
626 		if (old_rng != new_rng)
627 			add_early_randomness(new_rng);
628 		put_rng(new_rng);
629 	}
630 
631 	wait_for_completion(&rng->cleanup_done);
632 }
633 EXPORT_SYMBOL_GPL(hwrng_unregister);
634 
635 static void devm_hwrng_release(struct device *dev, void *res)
636 {
637 	hwrng_unregister(*(struct hwrng **)res);
638 }
639 
640 static int devm_hwrng_match(struct device *dev, void *res, void *data)
641 {
642 	struct hwrng **r = res;
643 
644 	if (WARN_ON(!r || !*r))
645 		return 0;
646 
647 	return *r == data;
648 }
649 
650 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
651 {
652 	struct hwrng **ptr;
653 	int error;
654 
655 	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
656 	if (!ptr)
657 		return -ENOMEM;
658 
659 	error = hwrng_register(rng);
660 	if (error) {
661 		devres_free(ptr);
662 		return error;
663 	}
664 
665 	*ptr = rng;
666 	devres_add(dev, ptr);
667 	return 0;
668 }
669 EXPORT_SYMBOL_GPL(devm_hwrng_register);
670 
671 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
672 {
673 	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
674 }
675 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
676 
677 long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
678 {
679 	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
680 
681 	return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
682 }
683 EXPORT_SYMBOL_GPL(hwrng_msleep);
684 
685 static int __init hwrng_modinit(void)
686 {
687 	int ret;
688 
689 	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
690 	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
691 	if (!rng_buffer)
692 		return -ENOMEM;
693 
694 	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
695 	if (!rng_fillbuf) {
696 		kfree(rng_buffer);
697 		return -ENOMEM;
698 	}
699 
700 	ret = register_miscdev();
701 	if (ret) {
702 		kfree(rng_fillbuf);
703 		kfree(rng_buffer);
704 	}
705 
706 	return ret;
707 }
708 
709 static void __exit hwrng_modexit(void)
710 {
711 	mutex_lock(&rng_mutex);
712 	BUG_ON(current_rng);
713 	kfree(rng_buffer);
714 	kfree(rng_fillbuf);
715 	mutex_unlock(&rng_mutex);
716 
717 	unregister_miscdev();
718 }
719 
720 fs_initcall(hwrng_modinit); /* depends on misc_register() */
721 module_exit(hwrng_modexit);
722 
723 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
724 MODULE_LICENSE("GPL");
725