xref: /linux/drivers/char/hw_random/core.c (revision 45fa321e7de604146603e24ee5bf3c0b766efe46)
1 /*
2  * hw_random/core.c: HWRNG core API
3  *
4  * Copyright 2006 Michael Buesch <m@bues.ch>
5  * Copyright 2005 (c) MontaVista Software, Inc.
6  *
7  * Please read Documentation/admin-guide/hw_random.rst for details on use.
8  *
9  * This software may be used and distributed according to the terms
10  * of the GNU General Public License, incorporated herein by reference.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/hw_random.h>
18 #include <linux/random.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/sched/signal.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 
29 #define RNG_MODULE_NAME		"hw_random"
30 
31 static struct hwrng *current_rng;
32 /* the current rng has been explicitly chosen by user via sysfs */
33 static int cur_rng_set_by_user;
34 static struct task_struct *hwrng_fill;
35 /* list of registered rngs */
36 static LIST_HEAD(rng_list);
37 /* Protects rng_list and current_rng */
38 static DEFINE_MUTEX(rng_mutex);
39 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
40 static DEFINE_MUTEX(reading_mutex);
41 static int data_avail;
42 static u8 *rng_buffer, *rng_fillbuf;
43 static unsigned short current_quality;
44 static unsigned short default_quality; /* = 0; default to "off" */
45 
46 module_param(current_quality, ushort, 0644);
47 MODULE_PARM_DESC(current_quality,
48 		 "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
49 module_param(default_quality, ushort, 0644);
50 MODULE_PARM_DESC(default_quality,
51 		 "default entropy content of hwrng per 1024 bits of input");
52 
53 static void drop_current_rng(void);
54 static int hwrng_init(struct hwrng *rng);
55 static void hwrng_manage_rngd(struct hwrng *rng);
56 
57 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
58 			       int wait);
59 
60 static size_t rng_buffer_size(void)
61 {
62 	return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
63 }
64 
65 static void add_early_randomness(struct hwrng *rng)
66 {
67 	int bytes_read;
68 
69 	mutex_lock(&reading_mutex);
70 	bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
71 	mutex_unlock(&reading_mutex);
72 	if (bytes_read > 0)
73 		add_device_randomness(rng_fillbuf, bytes_read);
74 }
75 
76 static inline void cleanup_rng(struct kref *kref)
77 {
78 	struct hwrng *rng = container_of(kref, struct hwrng, ref);
79 
80 	if (rng->cleanup)
81 		rng->cleanup(rng);
82 
83 	complete(&rng->cleanup_done);
84 }
85 
86 static int set_current_rng(struct hwrng *rng)
87 {
88 	int err;
89 
90 	BUG_ON(!mutex_is_locked(&rng_mutex));
91 
92 	err = hwrng_init(rng);
93 	if (err)
94 		return err;
95 
96 	drop_current_rng();
97 	current_rng = rng;
98 
99 	return 0;
100 }
101 
102 static void drop_current_rng(void)
103 {
104 	BUG_ON(!mutex_is_locked(&rng_mutex));
105 	if (!current_rng)
106 		return;
107 
108 	/* decrease last reference for triggering the cleanup */
109 	kref_put(&current_rng->ref, cleanup_rng);
110 	current_rng = NULL;
111 }
112 
113 /* Returns ERR_PTR(), NULL or refcounted hwrng */
114 static struct hwrng *get_current_rng_nolock(void)
115 {
116 	if (current_rng)
117 		kref_get(&current_rng->ref);
118 
119 	return current_rng;
120 }
121 
122 static struct hwrng *get_current_rng(void)
123 {
124 	struct hwrng *rng;
125 
126 	if (mutex_lock_interruptible(&rng_mutex))
127 		return ERR_PTR(-ERESTARTSYS);
128 
129 	rng = get_current_rng_nolock();
130 
131 	mutex_unlock(&rng_mutex);
132 	return rng;
133 }
134 
135 static void put_rng(struct hwrng *rng)
136 {
137 	/*
138 	 * Hold rng_mutex here so we serialize in case they set_current_rng
139 	 * on rng again immediately.
140 	 */
141 	mutex_lock(&rng_mutex);
142 	if (rng)
143 		kref_put(&rng->ref, cleanup_rng);
144 	mutex_unlock(&rng_mutex);
145 }
146 
147 static int hwrng_init(struct hwrng *rng)
148 {
149 	if (kref_get_unless_zero(&rng->ref))
150 		goto skip_init;
151 
152 	if (rng->init) {
153 		int ret;
154 
155 		ret =  rng->init(rng);
156 		if (ret)
157 			return ret;
158 	}
159 
160 	kref_init(&rng->ref);
161 	reinit_completion(&rng->cleanup_done);
162 
163 skip_init:
164 	if (!rng->quality)
165 		rng->quality = default_quality;
166 	if (rng->quality > 1024)
167 		rng->quality = 1024;
168 	current_quality = rng->quality; /* obsolete */
169 
170 	hwrng_manage_rngd(rng);
171 
172 	return 0;
173 }
174 
175 static int rng_dev_open(struct inode *inode, struct file *filp)
176 {
177 	/* enforce read-only access to this chrdev */
178 	if ((filp->f_mode & FMODE_READ) == 0)
179 		return -EINVAL;
180 	if (filp->f_mode & FMODE_WRITE)
181 		return -EINVAL;
182 	return 0;
183 }
184 
185 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
186 			int wait) {
187 	int present;
188 
189 	BUG_ON(!mutex_is_locked(&reading_mutex));
190 	if (rng->read)
191 		return rng->read(rng, (void *)buffer, size, wait);
192 
193 	if (rng->data_present)
194 		present = rng->data_present(rng, wait);
195 	else
196 		present = 1;
197 
198 	if (present)
199 		return rng->data_read(rng, (u32 *)buffer);
200 
201 	return 0;
202 }
203 
204 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
205 			    size_t size, loff_t *offp)
206 {
207 	ssize_t ret = 0;
208 	int err = 0;
209 	int bytes_read, len;
210 	struct hwrng *rng;
211 
212 	while (size) {
213 		rng = get_current_rng();
214 		if (IS_ERR(rng)) {
215 			err = PTR_ERR(rng);
216 			goto out;
217 		}
218 		if (!rng) {
219 			err = -ENODEV;
220 			goto out;
221 		}
222 
223 		if (mutex_lock_interruptible(&reading_mutex)) {
224 			err = -ERESTARTSYS;
225 			goto out_put;
226 		}
227 		if (!data_avail) {
228 			bytes_read = rng_get_data(rng, rng_buffer,
229 				rng_buffer_size(),
230 				!(filp->f_flags & O_NONBLOCK));
231 			if (bytes_read < 0) {
232 				err = bytes_read;
233 				goto out_unlock_reading;
234 			}
235 			data_avail = bytes_read;
236 		}
237 
238 		if (!data_avail) {
239 			if (filp->f_flags & O_NONBLOCK) {
240 				err = -EAGAIN;
241 				goto out_unlock_reading;
242 			}
243 		} else {
244 			len = data_avail;
245 			if (len > size)
246 				len = size;
247 
248 			data_avail -= len;
249 
250 			if (copy_to_user(buf + ret, rng_buffer + data_avail,
251 								len)) {
252 				err = -EFAULT;
253 				goto out_unlock_reading;
254 			}
255 
256 			size -= len;
257 			ret += len;
258 		}
259 
260 		mutex_unlock(&reading_mutex);
261 		put_rng(rng);
262 
263 		if (need_resched())
264 			schedule_timeout_interruptible(1);
265 
266 		if (signal_pending(current)) {
267 			err = -ERESTARTSYS;
268 			goto out;
269 		}
270 	}
271 out:
272 	return ret ? : err;
273 
274 out_unlock_reading:
275 	mutex_unlock(&reading_mutex);
276 out_put:
277 	put_rng(rng);
278 	goto out;
279 }
280 
281 static const struct file_operations rng_chrdev_ops = {
282 	.owner		= THIS_MODULE,
283 	.open		= rng_dev_open,
284 	.read		= rng_dev_read,
285 	.llseek		= noop_llseek,
286 };
287 
288 static const struct attribute_group *rng_dev_groups[];
289 
290 static struct miscdevice rng_miscdev = {
291 	.minor		= HWRNG_MINOR,
292 	.name		= RNG_MODULE_NAME,
293 	.nodename	= "hwrng",
294 	.fops		= &rng_chrdev_ops,
295 	.groups		= rng_dev_groups,
296 };
297 
298 static int enable_best_rng(void)
299 {
300 	struct hwrng *rng, *new_rng = NULL;
301 	int ret = -ENODEV;
302 
303 	BUG_ON(!mutex_is_locked(&rng_mutex));
304 
305 	/* no rng to use? */
306 	if (list_empty(&rng_list)) {
307 		drop_current_rng();
308 		cur_rng_set_by_user = 0;
309 		return 0;
310 	}
311 
312 	/* use the rng which offers the best quality */
313 	list_for_each_entry(rng, &rng_list, list) {
314 		if (!new_rng || rng->quality > new_rng->quality)
315 			new_rng = rng;
316 	}
317 
318 	ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
319 	if (!ret)
320 		cur_rng_set_by_user = 0;
321 
322 	return ret;
323 }
324 
325 static ssize_t rng_current_store(struct device *dev,
326 				 struct device_attribute *attr,
327 				 const char *buf, size_t len)
328 {
329 	int err;
330 	struct hwrng *rng, *old_rng, *new_rng;
331 
332 	err = mutex_lock_interruptible(&rng_mutex);
333 	if (err)
334 		return -ERESTARTSYS;
335 
336 	old_rng = current_rng;
337 	if (sysfs_streq(buf, "")) {
338 		err = enable_best_rng();
339 	} else {
340 		list_for_each_entry(rng, &rng_list, list) {
341 			if (sysfs_streq(rng->name, buf)) {
342 				err = set_current_rng(rng);
343 				if (!err)
344 					cur_rng_set_by_user = 1;
345 				break;
346 			}
347 		}
348 	}
349 	new_rng = get_current_rng_nolock();
350 	mutex_unlock(&rng_mutex);
351 
352 	if (new_rng) {
353 		if (new_rng != old_rng)
354 			add_early_randomness(new_rng);
355 		put_rng(new_rng);
356 	}
357 
358 	return err ? : len;
359 }
360 
361 static ssize_t rng_current_show(struct device *dev,
362 				struct device_attribute *attr,
363 				char *buf)
364 {
365 	ssize_t ret;
366 	struct hwrng *rng;
367 
368 	rng = get_current_rng();
369 	if (IS_ERR(rng))
370 		return PTR_ERR(rng);
371 
372 	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
373 	put_rng(rng);
374 
375 	return ret;
376 }
377 
378 static ssize_t rng_available_show(struct device *dev,
379 				  struct device_attribute *attr,
380 				  char *buf)
381 {
382 	int err;
383 	struct hwrng *rng;
384 
385 	err = mutex_lock_interruptible(&rng_mutex);
386 	if (err)
387 		return -ERESTARTSYS;
388 	buf[0] = '\0';
389 	list_for_each_entry(rng, &rng_list, list) {
390 		strlcat(buf, rng->name, PAGE_SIZE);
391 		strlcat(buf, " ", PAGE_SIZE);
392 	}
393 	strlcat(buf, "\n", PAGE_SIZE);
394 	mutex_unlock(&rng_mutex);
395 
396 	return strlen(buf);
397 }
398 
399 static ssize_t rng_selected_show(struct device *dev,
400 				 struct device_attribute *attr,
401 				 char *buf)
402 {
403 	return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
404 }
405 
406 static ssize_t rng_quality_show(struct device *dev,
407 				struct device_attribute *attr,
408 				char *buf)
409 {
410 	ssize_t ret;
411 	struct hwrng *rng;
412 
413 	rng = get_current_rng();
414 	if (IS_ERR(rng))
415 		return PTR_ERR(rng);
416 
417 	if (!rng) /* no need to put_rng */
418 		return -ENODEV;
419 
420 	ret = sysfs_emit(buf, "%hu\n", rng->quality);
421 	put_rng(rng);
422 
423 	return ret;
424 }
425 
426 static ssize_t rng_quality_store(struct device *dev,
427 				 struct device_attribute *attr,
428 				 const char *buf, size_t len)
429 {
430 	u16 quality;
431 	int ret = -EINVAL;
432 
433 	if (len < 2)
434 		return -EINVAL;
435 
436 	ret = mutex_lock_interruptible(&rng_mutex);
437 	if (ret)
438 		return -ERESTARTSYS;
439 
440 	ret = kstrtou16(buf, 0, &quality);
441 	if (ret || quality > 1024) {
442 		ret = -EINVAL;
443 		goto out;
444 	}
445 
446 	if (!current_rng) {
447 		ret = -ENODEV;
448 		goto out;
449 	}
450 
451 	current_rng->quality = quality;
452 	current_quality = quality; /* obsolete */
453 
454 	/* the best available RNG may have changed */
455 	ret = enable_best_rng();
456 
457 	/* start/stop rngd if necessary */
458 	if (current_rng)
459 		hwrng_manage_rngd(current_rng);
460 
461 out:
462 	mutex_unlock(&rng_mutex);
463 	return ret ? ret : len;
464 }
465 
466 static DEVICE_ATTR_RW(rng_current);
467 static DEVICE_ATTR_RO(rng_available);
468 static DEVICE_ATTR_RO(rng_selected);
469 static DEVICE_ATTR_RW(rng_quality);
470 
471 static struct attribute *rng_dev_attrs[] = {
472 	&dev_attr_rng_current.attr,
473 	&dev_attr_rng_available.attr,
474 	&dev_attr_rng_selected.attr,
475 	&dev_attr_rng_quality.attr,
476 	NULL
477 };
478 
479 ATTRIBUTE_GROUPS(rng_dev);
480 
481 static void __exit unregister_miscdev(void)
482 {
483 	misc_deregister(&rng_miscdev);
484 }
485 
486 static int __init register_miscdev(void)
487 {
488 	return misc_register(&rng_miscdev);
489 }
490 
491 static int hwrng_fillfn(void *unused)
492 {
493 	size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
494 	long rc;
495 
496 	while (!kthread_should_stop()) {
497 		unsigned short quality;
498 		struct hwrng *rng;
499 
500 		rng = get_current_rng();
501 		if (IS_ERR(rng) || !rng)
502 			break;
503 		mutex_lock(&reading_mutex);
504 		rc = rng_get_data(rng, rng_fillbuf,
505 				  rng_buffer_size(), 1);
506 		if (current_quality != rng->quality)
507 			rng->quality = current_quality; /* obsolete */
508 		quality = rng->quality;
509 		mutex_unlock(&reading_mutex);
510 
511 		if (rc <= 0)
512 			hwrng_msleep(rng, 10000);
513 
514 		put_rng(rng);
515 
516 		if (!quality)
517 			break;
518 
519 		if (rc <= 0)
520 			continue;
521 
522 		/* If we cannot credit at least one bit of entropy,
523 		 * keep track of the remainder for the next iteration
524 		 */
525 		entropy = rc * quality * 8 + entropy_credit;
526 		if ((entropy >> 10) == 0)
527 			entropy_credit = entropy;
528 
529 		/* Outside lock, sure, but y'know: randomness. */
530 		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
531 					   entropy >> 10);
532 	}
533 	hwrng_fill = NULL;
534 	return 0;
535 }
536 
537 static void hwrng_manage_rngd(struct hwrng *rng)
538 {
539 	if (WARN_ON(!mutex_is_locked(&rng_mutex)))
540 		return;
541 
542 	if (rng->quality == 0 && hwrng_fill)
543 		kthread_stop(hwrng_fill);
544 	if (rng->quality > 0 && !hwrng_fill) {
545 		hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
546 		if (IS_ERR(hwrng_fill)) {
547 			pr_err("hwrng_fill thread creation failed\n");
548 			hwrng_fill = NULL;
549 		}
550 	}
551 }
552 
553 int hwrng_register(struct hwrng *rng)
554 {
555 	int err = -EINVAL;
556 	struct hwrng *tmp;
557 	bool is_new_current = false;
558 
559 	if (!rng->name || (!rng->data_read && !rng->read))
560 		goto out;
561 
562 	mutex_lock(&rng_mutex);
563 
564 	/* Must not register two RNGs with the same name. */
565 	err = -EEXIST;
566 	list_for_each_entry(tmp, &rng_list, list) {
567 		if (strcmp(tmp->name, rng->name) == 0)
568 			goto out_unlock;
569 	}
570 	list_add_tail(&rng->list, &rng_list);
571 
572 	init_completion(&rng->cleanup_done);
573 	complete(&rng->cleanup_done);
574 	init_completion(&rng->dying);
575 
576 	if (!current_rng ||
577 	    (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
578 		/*
579 		 * Set new rng as current as the new rng source
580 		 * provides better entropy quality and was not
581 		 * chosen by userspace.
582 		 */
583 		err = set_current_rng(rng);
584 		if (err)
585 			goto out_unlock;
586 		/* to use current_rng in add_early_randomness() we need
587 		 * to take a ref
588 		 */
589 		is_new_current = true;
590 		kref_get(&rng->ref);
591 	}
592 	mutex_unlock(&rng_mutex);
593 	if (is_new_current || !rng->init) {
594 		/*
595 		 * Use a new device's input to add some randomness to
596 		 * the system.  If this rng device isn't going to be
597 		 * used right away, its init function hasn't been
598 		 * called yet by set_current_rng(); so only use the
599 		 * randomness from devices that don't need an init callback
600 		 */
601 		add_early_randomness(rng);
602 	}
603 	if (is_new_current)
604 		put_rng(rng);
605 	return 0;
606 out_unlock:
607 	mutex_unlock(&rng_mutex);
608 out:
609 	return err;
610 }
611 EXPORT_SYMBOL_GPL(hwrng_register);
612 
613 void hwrng_unregister(struct hwrng *rng)
614 {
615 	struct hwrng *old_rng, *new_rng;
616 	int err;
617 
618 	mutex_lock(&rng_mutex);
619 
620 	old_rng = current_rng;
621 	list_del(&rng->list);
622 	complete_all(&rng->dying);
623 	if (current_rng == rng) {
624 		err = enable_best_rng();
625 		if (err) {
626 			drop_current_rng();
627 			cur_rng_set_by_user = 0;
628 		}
629 	}
630 
631 	new_rng = get_current_rng_nolock();
632 	if (list_empty(&rng_list)) {
633 		mutex_unlock(&rng_mutex);
634 		if (hwrng_fill)
635 			kthread_stop(hwrng_fill);
636 	} else
637 		mutex_unlock(&rng_mutex);
638 
639 	if (new_rng) {
640 		if (old_rng != new_rng)
641 			add_early_randomness(new_rng);
642 		put_rng(new_rng);
643 	}
644 
645 	wait_for_completion(&rng->cleanup_done);
646 }
647 EXPORT_SYMBOL_GPL(hwrng_unregister);
648 
649 static void devm_hwrng_release(struct device *dev, void *res)
650 {
651 	hwrng_unregister(*(struct hwrng **)res);
652 }
653 
654 static int devm_hwrng_match(struct device *dev, void *res, void *data)
655 {
656 	struct hwrng **r = res;
657 
658 	if (WARN_ON(!r || !*r))
659 		return 0;
660 
661 	return *r == data;
662 }
663 
664 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
665 {
666 	struct hwrng **ptr;
667 	int error;
668 
669 	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
670 	if (!ptr)
671 		return -ENOMEM;
672 
673 	error = hwrng_register(rng);
674 	if (error) {
675 		devres_free(ptr);
676 		return error;
677 	}
678 
679 	*ptr = rng;
680 	devres_add(dev, ptr);
681 	return 0;
682 }
683 EXPORT_SYMBOL_GPL(devm_hwrng_register);
684 
685 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
686 {
687 	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
688 }
689 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
690 
691 long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
692 {
693 	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
694 
695 	return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
696 }
697 EXPORT_SYMBOL_GPL(hwrng_msleep);
698 
699 static int __init hwrng_modinit(void)
700 {
701 	int ret;
702 
703 	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
704 	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
705 	if (!rng_buffer)
706 		return -ENOMEM;
707 
708 	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
709 	if (!rng_fillbuf) {
710 		kfree(rng_buffer);
711 		return -ENOMEM;
712 	}
713 
714 	ret = register_miscdev();
715 	if (ret) {
716 		kfree(rng_fillbuf);
717 		kfree(rng_buffer);
718 	}
719 
720 	return ret;
721 }
722 
723 static void __exit hwrng_modexit(void)
724 {
725 	mutex_lock(&rng_mutex);
726 	BUG_ON(current_rng);
727 	kfree(rng_buffer);
728 	kfree(rng_fillbuf);
729 	mutex_unlock(&rng_mutex);
730 
731 	unregister_miscdev();
732 }
733 
734 fs_initcall(hwrng_modinit); /* depends on misc_register() */
735 module_exit(hwrng_modexit);
736 
737 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
738 MODULE_LICENSE("GPL");
739