xref: /linux/drivers/char/hw_random/core.c (revision 3d780c8a9850ad60dee47a8d971ba7888f3d1bd3)
1 /*
2  * hw_random/core.c: HWRNG core API
3  *
4  * Copyright 2006 Michael Buesch <m@bues.ch>
5  * Copyright 2005 (c) MontaVista Software, Inc.
6  *
7  * Please read Documentation/admin-guide/hw_random.rst for details on use.
8  *
9  * This software may be used and distributed according to the terms
10  * of the GNU General Public License, incorporated herein by reference.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/hw_random.h>
18 #include <linux/random.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/sched/signal.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 
29 #define RNG_MODULE_NAME		"hw_random"
30 
31 static struct hwrng *current_rng;
32 /* the current rng has been explicitly chosen by user via sysfs */
33 static int cur_rng_set_by_user;
34 static struct task_struct *hwrng_fill;
35 /* list of registered rngs */
36 static LIST_HEAD(rng_list);
37 /* Protects rng_list and current_rng */
38 static DEFINE_MUTEX(rng_mutex);
39 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
40 static DEFINE_MUTEX(reading_mutex);
41 static int data_avail;
42 static u8 *rng_buffer, *rng_fillbuf;
43 static unsigned short current_quality;
44 static unsigned short default_quality = 1024; /* default to maximum */
45 
46 module_param(current_quality, ushort, 0644);
47 MODULE_PARM_DESC(current_quality,
48 		 "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
49 module_param(default_quality, ushort, 0644);
50 MODULE_PARM_DESC(default_quality,
51 		 "default maximum entropy content of hwrng per 1024 bits of input");
52 
53 static void drop_current_rng(void);
54 static int hwrng_init(struct hwrng *rng);
55 static int hwrng_fillfn(void *unused);
56 
57 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
58 			       int wait);
59 
60 static size_t rng_buffer_size(void)
61 {
62 	return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
63 }
64 
65 static void add_early_randomness(struct hwrng *rng)
66 {
67 	int bytes_read;
68 
69 	mutex_lock(&reading_mutex);
70 	bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
71 	mutex_unlock(&reading_mutex);
72 	if (bytes_read > 0)
73 		add_device_randomness(rng_fillbuf, bytes_read);
74 }
75 
76 static inline void cleanup_rng(struct kref *kref)
77 {
78 	struct hwrng *rng = container_of(kref, struct hwrng, ref);
79 
80 	if (rng->cleanup)
81 		rng->cleanup(rng);
82 
83 	complete(&rng->cleanup_done);
84 }
85 
86 static int set_current_rng(struct hwrng *rng)
87 {
88 	int err;
89 
90 	BUG_ON(!mutex_is_locked(&rng_mutex));
91 
92 	err = hwrng_init(rng);
93 	if (err)
94 		return err;
95 
96 	drop_current_rng();
97 	current_rng = rng;
98 
99 	/* if necessary, start hwrng thread */
100 	if (!hwrng_fill) {
101 		hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
102 		if (IS_ERR(hwrng_fill)) {
103 			pr_err("hwrng_fill thread creation failed\n");
104 			hwrng_fill = NULL;
105 		}
106 	}
107 
108 	return 0;
109 }
110 
111 static void drop_current_rng(void)
112 {
113 	BUG_ON(!mutex_is_locked(&rng_mutex));
114 	if (!current_rng)
115 		return;
116 
117 	/* decrease last reference for triggering the cleanup */
118 	kref_put(&current_rng->ref, cleanup_rng);
119 	current_rng = NULL;
120 }
121 
122 /* Returns ERR_PTR(), NULL or refcounted hwrng */
123 static struct hwrng *get_current_rng_nolock(void)
124 {
125 	if (current_rng)
126 		kref_get(&current_rng->ref);
127 
128 	return current_rng;
129 }
130 
131 static struct hwrng *get_current_rng(void)
132 {
133 	struct hwrng *rng;
134 
135 	if (mutex_lock_interruptible(&rng_mutex))
136 		return ERR_PTR(-ERESTARTSYS);
137 
138 	rng = get_current_rng_nolock();
139 
140 	mutex_unlock(&rng_mutex);
141 	return rng;
142 }
143 
144 static void put_rng(struct hwrng *rng)
145 {
146 	/*
147 	 * Hold rng_mutex here so we serialize in case they set_current_rng
148 	 * on rng again immediately.
149 	 */
150 	mutex_lock(&rng_mutex);
151 	if (rng)
152 		kref_put(&rng->ref, cleanup_rng);
153 	mutex_unlock(&rng_mutex);
154 }
155 
156 static int hwrng_init(struct hwrng *rng)
157 {
158 	if (kref_get_unless_zero(&rng->ref))
159 		goto skip_init;
160 
161 	if (rng->init) {
162 		int ret;
163 
164 		ret =  rng->init(rng);
165 		if (ret)
166 			return ret;
167 	}
168 
169 	kref_init(&rng->ref);
170 	reinit_completion(&rng->cleanup_done);
171 
172 skip_init:
173 	rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
174 	current_quality = rng->quality; /* obsolete */
175 
176 	return 0;
177 }
178 
179 static int rng_dev_open(struct inode *inode, struct file *filp)
180 {
181 	/* enforce read-only access to this chrdev */
182 	if ((filp->f_mode & FMODE_READ) == 0)
183 		return -EINVAL;
184 	if (filp->f_mode & FMODE_WRITE)
185 		return -EINVAL;
186 	return 0;
187 }
188 
189 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
190 			int wait) {
191 	int present;
192 
193 	BUG_ON(!mutex_is_locked(&reading_mutex));
194 	if (rng->read)
195 		return rng->read(rng, (void *)buffer, size, wait);
196 
197 	if (rng->data_present)
198 		present = rng->data_present(rng, wait);
199 	else
200 		present = 1;
201 
202 	if (present)
203 		return rng->data_read(rng, (u32 *)buffer);
204 
205 	return 0;
206 }
207 
208 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
209 			    size_t size, loff_t *offp)
210 {
211 	ssize_t ret = 0;
212 	int err = 0;
213 	int bytes_read, len;
214 	struct hwrng *rng;
215 
216 	while (size) {
217 		rng = get_current_rng();
218 		if (IS_ERR(rng)) {
219 			err = PTR_ERR(rng);
220 			goto out;
221 		}
222 		if (!rng) {
223 			err = -ENODEV;
224 			goto out;
225 		}
226 
227 		if (mutex_lock_interruptible(&reading_mutex)) {
228 			err = -ERESTARTSYS;
229 			goto out_put;
230 		}
231 		if (!data_avail) {
232 			bytes_read = rng_get_data(rng, rng_buffer,
233 				rng_buffer_size(),
234 				!(filp->f_flags & O_NONBLOCK));
235 			if (bytes_read < 0) {
236 				err = bytes_read;
237 				goto out_unlock_reading;
238 			}
239 			data_avail = bytes_read;
240 		}
241 
242 		if (!data_avail) {
243 			if (filp->f_flags & O_NONBLOCK) {
244 				err = -EAGAIN;
245 				goto out_unlock_reading;
246 			}
247 		} else {
248 			len = data_avail;
249 			if (len > size)
250 				len = size;
251 
252 			data_avail -= len;
253 
254 			if (copy_to_user(buf + ret, rng_buffer + data_avail,
255 								len)) {
256 				err = -EFAULT;
257 				goto out_unlock_reading;
258 			}
259 
260 			size -= len;
261 			ret += len;
262 		}
263 
264 		mutex_unlock(&reading_mutex);
265 		put_rng(rng);
266 
267 		if (need_resched())
268 			schedule_timeout_interruptible(1);
269 
270 		if (signal_pending(current)) {
271 			err = -ERESTARTSYS;
272 			goto out;
273 		}
274 	}
275 out:
276 	return ret ? : err;
277 
278 out_unlock_reading:
279 	mutex_unlock(&reading_mutex);
280 out_put:
281 	put_rng(rng);
282 	goto out;
283 }
284 
285 static const struct file_operations rng_chrdev_ops = {
286 	.owner		= THIS_MODULE,
287 	.open		= rng_dev_open,
288 	.read		= rng_dev_read,
289 	.llseek		= noop_llseek,
290 };
291 
292 static const struct attribute_group *rng_dev_groups[];
293 
294 static struct miscdevice rng_miscdev = {
295 	.minor		= HWRNG_MINOR,
296 	.name		= RNG_MODULE_NAME,
297 	.nodename	= "hwrng",
298 	.fops		= &rng_chrdev_ops,
299 	.groups		= rng_dev_groups,
300 };
301 
302 static int enable_best_rng(void)
303 {
304 	struct hwrng *rng, *new_rng = NULL;
305 	int ret = -ENODEV;
306 
307 	BUG_ON(!mutex_is_locked(&rng_mutex));
308 
309 	/* no rng to use? */
310 	if (list_empty(&rng_list)) {
311 		drop_current_rng();
312 		cur_rng_set_by_user = 0;
313 		return 0;
314 	}
315 
316 	/* use the rng which offers the best quality */
317 	list_for_each_entry(rng, &rng_list, list) {
318 		if (!new_rng || rng->quality > new_rng->quality)
319 			new_rng = rng;
320 	}
321 
322 	ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
323 	if (!ret)
324 		cur_rng_set_by_user = 0;
325 
326 	return ret;
327 }
328 
329 static ssize_t rng_current_store(struct device *dev,
330 				 struct device_attribute *attr,
331 				 const char *buf, size_t len)
332 {
333 	int err;
334 	struct hwrng *rng, *old_rng, *new_rng;
335 
336 	err = mutex_lock_interruptible(&rng_mutex);
337 	if (err)
338 		return -ERESTARTSYS;
339 
340 	old_rng = current_rng;
341 	if (sysfs_streq(buf, "")) {
342 		err = enable_best_rng();
343 	} else {
344 		list_for_each_entry(rng, &rng_list, list) {
345 			if (sysfs_streq(rng->name, buf)) {
346 				err = set_current_rng(rng);
347 				if (!err)
348 					cur_rng_set_by_user = 1;
349 				break;
350 			}
351 		}
352 	}
353 	new_rng = get_current_rng_nolock();
354 	mutex_unlock(&rng_mutex);
355 
356 	if (new_rng) {
357 		if (new_rng != old_rng)
358 			add_early_randomness(new_rng);
359 		put_rng(new_rng);
360 	}
361 
362 	return err ? : len;
363 }
364 
365 static ssize_t rng_current_show(struct device *dev,
366 				struct device_attribute *attr,
367 				char *buf)
368 {
369 	ssize_t ret;
370 	struct hwrng *rng;
371 
372 	rng = get_current_rng();
373 	if (IS_ERR(rng))
374 		return PTR_ERR(rng);
375 
376 	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
377 	put_rng(rng);
378 
379 	return ret;
380 }
381 
382 static ssize_t rng_available_show(struct device *dev,
383 				  struct device_attribute *attr,
384 				  char *buf)
385 {
386 	int err;
387 	struct hwrng *rng;
388 
389 	err = mutex_lock_interruptible(&rng_mutex);
390 	if (err)
391 		return -ERESTARTSYS;
392 	buf[0] = '\0';
393 	list_for_each_entry(rng, &rng_list, list) {
394 		strlcat(buf, rng->name, PAGE_SIZE);
395 		strlcat(buf, " ", PAGE_SIZE);
396 	}
397 	strlcat(buf, "\n", PAGE_SIZE);
398 	mutex_unlock(&rng_mutex);
399 
400 	return strlen(buf);
401 }
402 
403 static ssize_t rng_selected_show(struct device *dev,
404 				 struct device_attribute *attr,
405 				 char *buf)
406 {
407 	return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
408 }
409 
410 static ssize_t rng_quality_show(struct device *dev,
411 				struct device_attribute *attr,
412 				char *buf)
413 {
414 	ssize_t ret;
415 	struct hwrng *rng;
416 
417 	rng = get_current_rng();
418 	if (IS_ERR(rng))
419 		return PTR_ERR(rng);
420 
421 	if (!rng) /* no need to put_rng */
422 		return -ENODEV;
423 
424 	ret = sysfs_emit(buf, "%hu\n", rng->quality);
425 	put_rng(rng);
426 
427 	return ret;
428 }
429 
430 static ssize_t rng_quality_store(struct device *dev,
431 				 struct device_attribute *attr,
432 				 const char *buf, size_t len)
433 {
434 	u16 quality;
435 	int ret = -EINVAL;
436 
437 	if (len < 2)
438 		return -EINVAL;
439 
440 	ret = mutex_lock_interruptible(&rng_mutex);
441 	if (ret)
442 		return -ERESTARTSYS;
443 
444 	ret = kstrtou16(buf, 0, &quality);
445 	if (ret || quality > 1024) {
446 		ret = -EINVAL;
447 		goto out;
448 	}
449 
450 	if (!current_rng) {
451 		ret = -ENODEV;
452 		goto out;
453 	}
454 
455 	current_rng->quality = quality;
456 	current_quality = quality; /* obsolete */
457 
458 	/* the best available RNG may have changed */
459 	ret = enable_best_rng();
460 
461 out:
462 	mutex_unlock(&rng_mutex);
463 	return ret ? ret : len;
464 }
465 
466 static DEVICE_ATTR_RW(rng_current);
467 static DEVICE_ATTR_RO(rng_available);
468 static DEVICE_ATTR_RO(rng_selected);
469 static DEVICE_ATTR_RW(rng_quality);
470 
471 static struct attribute *rng_dev_attrs[] = {
472 	&dev_attr_rng_current.attr,
473 	&dev_attr_rng_available.attr,
474 	&dev_attr_rng_selected.attr,
475 	&dev_attr_rng_quality.attr,
476 	NULL
477 };
478 
479 ATTRIBUTE_GROUPS(rng_dev);
480 
481 static void __exit unregister_miscdev(void)
482 {
483 	misc_deregister(&rng_miscdev);
484 }
485 
486 static int __init register_miscdev(void)
487 {
488 	return misc_register(&rng_miscdev);
489 }
490 
491 static int hwrng_fillfn(void *unused)
492 {
493 	size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
494 	long rc;
495 
496 	while (!kthread_should_stop()) {
497 		unsigned short quality;
498 		struct hwrng *rng;
499 
500 		rng = get_current_rng();
501 		if (IS_ERR(rng) || !rng)
502 			break;
503 		mutex_lock(&reading_mutex);
504 		rc = rng_get_data(rng, rng_fillbuf,
505 				  rng_buffer_size(), 1);
506 		if (current_quality != rng->quality)
507 			rng->quality = current_quality; /* obsolete */
508 		quality = rng->quality;
509 		mutex_unlock(&reading_mutex);
510 
511 		if (rc <= 0)
512 			hwrng_msleep(rng, 10000);
513 
514 		put_rng(rng);
515 
516 		if (rc <= 0)
517 			continue;
518 
519 		/* If we cannot credit at least one bit of entropy,
520 		 * keep track of the remainder for the next iteration
521 		 */
522 		entropy = rc * quality * 8 + entropy_credit;
523 		if ((entropy >> 10) == 0)
524 			entropy_credit = entropy;
525 
526 		/* Outside lock, sure, but y'know: randomness. */
527 		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
528 					   entropy >> 10);
529 	}
530 	hwrng_fill = NULL;
531 	return 0;
532 }
533 
534 int hwrng_register(struct hwrng *rng)
535 {
536 	int err = -EINVAL;
537 	struct hwrng *tmp;
538 	bool is_new_current = false;
539 
540 	if (!rng->name || (!rng->data_read && !rng->read))
541 		goto out;
542 
543 	mutex_lock(&rng_mutex);
544 
545 	/* Must not register two RNGs with the same name. */
546 	err = -EEXIST;
547 	list_for_each_entry(tmp, &rng_list, list) {
548 		if (strcmp(tmp->name, rng->name) == 0)
549 			goto out_unlock;
550 	}
551 	list_add_tail(&rng->list, &rng_list);
552 
553 	init_completion(&rng->cleanup_done);
554 	complete(&rng->cleanup_done);
555 	init_completion(&rng->dying);
556 
557 	if (!current_rng ||
558 	    (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
559 		/*
560 		 * Set new rng as current as the new rng source
561 		 * provides better entropy quality and was not
562 		 * chosen by userspace.
563 		 */
564 		err = set_current_rng(rng);
565 		if (err)
566 			goto out_unlock;
567 		/* to use current_rng in add_early_randomness() we need
568 		 * to take a ref
569 		 */
570 		is_new_current = true;
571 		kref_get(&rng->ref);
572 	}
573 	mutex_unlock(&rng_mutex);
574 	if (is_new_current || !rng->init) {
575 		/*
576 		 * Use a new device's input to add some randomness to
577 		 * the system.  If this rng device isn't going to be
578 		 * used right away, its init function hasn't been
579 		 * called yet by set_current_rng(); so only use the
580 		 * randomness from devices that don't need an init callback
581 		 */
582 		add_early_randomness(rng);
583 	}
584 	if (is_new_current)
585 		put_rng(rng);
586 	return 0;
587 out_unlock:
588 	mutex_unlock(&rng_mutex);
589 out:
590 	return err;
591 }
592 EXPORT_SYMBOL_GPL(hwrng_register);
593 
594 void hwrng_unregister(struct hwrng *rng)
595 {
596 	struct hwrng *old_rng, *new_rng;
597 	int err;
598 
599 	mutex_lock(&rng_mutex);
600 
601 	old_rng = current_rng;
602 	list_del(&rng->list);
603 	complete_all(&rng->dying);
604 	if (current_rng == rng) {
605 		err = enable_best_rng();
606 		if (err) {
607 			drop_current_rng();
608 			cur_rng_set_by_user = 0;
609 		}
610 	}
611 
612 	new_rng = get_current_rng_nolock();
613 	if (list_empty(&rng_list)) {
614 		mutex_unlock(&rng_mutex);
615 		if (hwrng_fill)
616 			kthread_stop(hwrng_fill);
617 	} else
618 		mutex_unlock(&rng_mutex);
619 
620 	if (new_rng) {
621 		if (old_rng != new_rng)
622 			add_early_randomness(new_rng);
623 		put_rng(new_rng);
624 	}
625 
626 	wait_for_completion(&rng->cleanup_done);
627 }
628 EXPORT_SYMBOL_GPL(hwrng_unregister);
629 
630 static void devm_hwrng_release(struct device *dev, void *res)
631 {
632 	hwrng_unregister(*(struct hwrng **)res);
633 }
634 
635 static int devm_hwrng_match(struct device *dev, void *res, void *data)
636 {
637 	struct hwrng **r = res;
638 
639 	if (WARN_ON(!r || !*r))
640 		return 0;
641 
642 	return *r == data;
643 }
644 
645 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
646 {
647 	struct hwrng **ptr;
648 	int error;
649 
650 	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
651 	if (!ptr)
652 		return -ENOMEM;
653 
654 	error = hwrng_register(rng);
655 	if (error) {
656 		devres_free(ptr);
657 		return error;
658 	}
659 
660 	*ptr = rng;
661 	devres_add(dev, ptr);
662 	return 0;
663 }
664 EXPORT_SYMBOL_GPL(devm_hwrng_register);
665 
666 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
667 {
668 	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
669 }
670 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
671 
672 long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
673 {
674 	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
675 
676 	return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
677 }
678 EXPORT_SYMBOL_GPL(hwrng_msleep);
679 
680 static int __init hwrng_modinit(void)
681 {
682 	int ret;
683 
684 	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
685 	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
686 	if (!rng_buffer)
687 		return -ENOMEM;
688 
689 	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
690 	if (!rng_fillbuf) {
691 		kfree(rng_buffer);
692 		return -ENOMEM;
693 	}
694 
695 	ret = register_miscdev();
696 	if (ret) {
697 		kfree(rng_fillbuf);
698 		kfree(rng_buffer);
699 	}
700 
701 	return ret;
702 }
703 
704 static void __exit hwrng_modexit(void)
705 {
706 	mutex_lock(&rng_mutex);
707 	BUG_ON(current_rng);
708 	kfree(rng_buffer);
709 	kfree(rng_fillbuf);
710 	mutex_unlock(&rng_mutex);
711 
712 	unregister_miscdev();
713 }
714 
715 fs_initcall(hwrng_modinit); /* depends on misc_register() */
716 module_exit(hwrng_modexit);
717 
718 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
719 MODULE_LICENSE("GPL");
720