xref: /linux/drivers/char/hw_random/core.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * hw_random/core.c: HWRNG core API
3  *
4  * Copyright 2006 Michael Buesch <m@bues.ch>
5  * Copyright 2005 (c) MontaVista Software, Inc.
6  *
7  * Please read Documentation/hw_random.txt for details on use.
8  *
9  * This software may be used and distributed according to the terms
10  * of the GNU General Public License, incorporated herein by reference.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/hw_random.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/sched/signal.h>
21 #include <linux/miscdevice.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 
28 #define RNG_MODULE_NAME		"hw_random"
29 
30 static struct hwrng *current_rng;
31 static struct task_struct *hwrng_fill;
32 static LIST_HEAD(rng_list);
33 /* Protects rng_list and current_rng */
34 static DEFINE_MUTEX(rng_mutex);
35 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
36 static DEFINE_MUTEX(reading_mutex);
37 static int data_avail;
38 static u8 *rng_buffer, *rng_fillbuf;
39 static unsigned short current_quality;
40 static unsigned short default_quality; /* = 0; default to "off" */
41 
42 module_param(current_quality, ushort, 0644);
43 MODULE_PARM_DESC(current_quality,
44 		 "current hwrng entropy estimation per mill");
45 module_param(default_quality, ushort, 0644);
46 MODULE_PARM_DESC(default_quality,
47 		 "default entropy content of hwrng per mill");
48 
49 static void drop_current_rng(void);
50 static int hwrng_init(struct hwrng *rng);
51 static void start_khwrngd(void);
52 
53 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
54 			       int wait);
55 
56 static size_t rng_buffer_size(void)
57 {
58 	return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
59 }
60 
61 static void add_early_randomness(struct hwrng *rng)
62 {
63 	int bytes_read;
64 	size_t size = min_t(size_t, 16, rng_buffer_size());
65 
66 	mutex_lock(&reading_mutex);
67 	bytes_read = rng_get_data(rng, rng_buffer, size, 1);
68 	mutex_unlock(&reading_mutex);
69 	if (bytes_read > 0)
70 		add_device_randomness(rng_buffer, bytes_read);
71 }
72 
73 static inline void cleanup_rng(struct kref *kref)
74 {
75 	struct hwrng *rng = container_of(kref, struct hwrng, ref);
76 
77 	if (rng->cleanup)
78 		rng->cleanup(rng);
79 
80 	complete(&rng->cleanup_done);
81 }
82 
83 static int set_current_rng(struct hwrng *rng)
84 {
85 	int err;
86 
87 	BUG_ON(!mutex_is_locked(&rng_mutex));
88 
89 	err = hwrng_init(rng);
90 	if (err)
91 		return err;
92 
93 	drop_current_rng();
94 	current_rng = rng;
95 
96 	return 0;
97 }
98 
99 static void drop_current_rng(void)
100 {
101 	BUG_ON(!mutex_is_locked(&rng_mutex));
102 	if (!current_rng)
103 		return;
104 
105 	/* decrease last reference for triggering the cleanup */
106 	kref_put(&current_rng->ref, cleanup_rng);
107 	current_rng = NULL;
108 }
109 
110 /* Returns ERR_PTR(), NULL or refcounted hwrng */
111 static struct hwrng *get_current_rng(void)
112 {
113 	struct hwrng *rng;
114 
115 	if (mutex_lock_interruptible(&rng_mutex))
116 		return ERR_PTR(-ERESTARTSYS);
117 
118 	rng = current_rng;
119 	if (rng)
120 		kref_get(&rng->ref);
121 
122 	mutex_unlock(&rng_mutex);
123 	return rng;
124 }
125 
126 static void put_rng(struct hwrng *rng)
127 {
128 	/*
129 	 * Hold rng_mutex here so we serialize in case they set_current_rng
130 	 * on rng again immediately.
131 	 */
132 	mutex_lock(&rng_mutex);
133 	if (rng)
134 		kref_put(&rng->ref, cleanup_rng);
135 	mutex_unlock(&rng_mutex);
136 }
137 
138 static int hwrng_init(struct hwrng *rng)
139 {
140 	if (kref_get_unless_zero(&rng->ref))
141 		goto skip_init;
142 
143 	if (rng->init) {
144 		int ret;
145 
146 		ret =  rng->init(rng);
147 		if (ret)
148 			return ret;
149 	}
150 
151 	kref_init(&rng->ref);
152 	reinit_completion(&rng->cleanup_done);
153 
154 skip_init:
155 	add_early_randomness(rng);
156 
157 	current_quality = rng->quality ? : default_quality;
158 	if (current_quality > 1024)
159 		current_quality = 1024;
160 
161 	if (current_quality == 0 && hwrng_fill)
162 		kthread_stop(hwrng_fill);
163 	if (current_quality > 0 && !hwrng_fill)
164 		start_khwrngd();
165 
166 	return 0;
167 }
168 
169 static int rng_dev_open(struct inode *inode, struct file *filp)
170 {
171 	/* enforce read-only access to this chrdev */
172 	if ((filp->f_mode & FMODE_READ) == 0)
173 		return -EINVAL;
174 	if (filp->f_mode & FMODE_WRITE)
175 		return -EINVAL;
176 	return 0;
177 }
178 
179 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
180 			int wait) {
181 	int present;
182 
183 	BUG_ON(!mutex_is_locked(&reading_mutex));
184 	if (rng->read)
185 		return rng->read(rng, (void *)buffer, size, wait);
186 
187 	if (rng->data_present)
188 		present = rng->data_present(rng, wait);
189 	else
190 		present = 1;
191 
192 	if (present)
193 		return rng->data_read(rng, (u32 *)buffer);
194 
195 	return 0;
196 }
197 
198 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
199 			    size_t size, loff_t *offp)
200 {
201 	ssize_t ret = 0;
202 	int err = 0;
203 	int bytes_read, len;
204 	struct hwrng *rng;
205 
206 	while (size) {
207 		rng = get_current_rng();
208 		if (IS_ERR(rng)) {
209 			err = PTR_ERR(rng);
210 			goto out;
211 		}
212 		if (!rng) {
213 			err = -ENODEV;
214 			goto out;
215 		}
216 
217 		if (mutex_lock_interruptible(&reading_mutex)) {
218 			err = -ERESTARTSYS;
219 			goto out_put;
220 		}
221 		if (!data_avail) {
222 			bytes_read = rng_get_data(rng, rng_buffer,
223 				rng_buffer_size(),
224 				!(filp->f_flags & O_NONBLOCK));
225 			if (bytes_read < 0) {
226 				err = bytes_read;
227 				goto out_unlock_reading;
228 			}
229 			data_avail = bytes_read;
230 		}
231 
232 		if (!data_avail) {
233 			if (filp->f_flags & O_NONBLOCK) {
234 				err = -EAGAIN;
235 				goto out_unlock_reading;
236 			}
237 		} else {
238 			len = data_avail;
239 			if (len > size)
240 				len = size;
241 
242 			data_avail -= len;
243 
244 			if (copy_to_user(buf + ret, rng_buffer + data_avail,
245 								len)) {
246 				err = -EFAULT;
247 				goto out_unlock_reading;
248 			}
249 
250 			size -= len;
251 			ret += len;
252 		}
253 
254 		mutex_unlock(&reading_mutex);
255 		put_rng(rng);
256 
257 		if (need_resched())
258 			schedule_timeout_interruptible(1);
259 
260 		if (signal_pending(current)) {
261 			err = -ERESTARTSYS;
262 			goto out;
263 		}
264 	}
265 out:
266 	return ret ? : err;
267 
268 out_unlock_reading:
269 	mutex_unlock(&reading_mutex);
270 out_put:
271 	put_rng(rng);
272 	goto out;
273 }
274 
275 static const struct file_operations rng_chrdev_ops = {
276 	.owner		= THIS_MODULE,
277 	.open		= rng_dev_open,
278 	.read		= rng_dev_read,
279 	.llseek		= noop_llseek,
280 };
281 
282 static const struct attribute_group *rng_dev_groups[];
283 
284 static struct miscdevice rng_miscdev = {
285 	.minor		= HWRNG_MINOR,
286 	.name		= RNG_MODULE_NAME,
287 	.nodename	= "hwrng",
288 	.fops		= &rng_chrdev_ops,
289 	.groups		= rng_dev_groups,
290 };
291 
292 static ssize_t hwrng_attr_current_store(struct device *dev,
293 					struct device_attribute *attr,
294 					const char *buf, size_t len)
295 {
296 	int err;
297 	struct hwrng *rng;
298 
299 	err = mutex_lock_interruptible(&rng_mutex);
300 	if (err)
301 		return -ERESTARTSYS;
302 	err = -ENODEV;
303 	list_for_each_entry(rng, &rng_list, list) {
304 		if (sysfs_streq(rng->name, buf)) {
305 			err = 0;
306 			if (rng != current_rng)
307 				err = set_current_rng(rng);
308 			break;
309 		}
310 	}
311 	mutex_unlock(&rng_mutex);
312 
313 	return err ? : len;
314 }
315 
316 static ssize_t hwrng_attr_current_show(struct device *dev,
317 				       struct device_attribute *attr,
318 				       char *buf)
319 {
320 	ssize_t ret;
321 	struct hwrng *rng;
322 
323 	rng = get_current_rng();
324 	if (IS_ERR(rng))
325 		return PTR_ERR(rng);
326 
327 	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
328 	put_rng(rng);
329 
330 	return ret;
331 }
332 
333 static ssize_t hwrng_attr_available_show(struct device *dev,
334 					 struct device_attribute *attr,
335 					 char *buf)
336 {
337 	int err;
338 	struct hwrng *rng;
339 
340 	err = mutex_lock_interruptible(&rng_mutex);
341 	if (err)
342 		return -ERESTARTSYS;
343 	buf[0] = '\0';
344 	list_for_each_entry(rng, &rng_list, list) {
345 		strlcat(buf, rng->name, PAGE_SIZE);
346 		strlcat(buf, " ", PAGE_SIZE);
347 	}
348 	strlcat(buf, "\n", PAGE_SIZE);
349 	mutex_unlock(&rng_mutex);
350 
351 	return strlen(buf);
352 }
353 
354 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
355 		   hwrng_attr_current_show,
356 		   hwrng_attr_current_store);
357 static DEVICE_ATTR(rng_available, S_IRUGO,
358 		   hwrng_attr_available_show,
359 		   NULL);
360 
361 static struct attribute *rng_dev_attrs[] = {
362 	&dev_attr_rng_current.attr,
363 	&dev_attr_rng_available.attr,
364 	NULL
365 };
366 
367 ATTRIBUTE_GROUPS(rng_dev);
368 
369 static void __exit unregister_miscdev(void)
370 {
371 	misc_deregister(&rng_miscdev);
372 }
373 
374 static int __init register_miscdev(void)
375 {
376 	return misc_register(&rng_miscdev);
377 }
378 
379 static int hwrng_fillfn(void *unused)
380 {
381 	long rc;
382 
383 	while (!kthread_should_stop()) {
384 		struct hwrng *rng;
385 
386 		rng = get_current_rng();
387 		if (IS_ERR(rng) || !rng)
388 			break;
389 		mutex_lock(&reading_mutex);
390 		rc = rng_get_data(rng, rng_fillbuf,
391 				  rng_buffer_size(), 1);
392 		mutex_unlock(&reading_mutex);
393 		put_rng(rng);
394 		if (rc <= 0) {
395 			pr_warn("hwrng: no data available\n");
396 			msleep_interruptible(10000);
397 			continue;
398 		}
399 		/* Outside lock, sure, but y'know: randomness. */
400 		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
401 					   rc * current_quality * 8 >> 10);
402 	}
403 	hwrng_fill = NULL;
404 	return 0;
405 }
406 
407 static void start_khwrngd(void)
408 {
409 	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
410 	if (IS_ERR(hwrng_fill)) {
411 		pr_err("hwrng_fill thread creation failed");
412 		hwrng_fill = NULL;
413 	}
414 }
415 
416 int hwrng_register(struct hwrng *rng)
417 {
418 	int err = -EINVAL;
419 	struct hwrng *old_rng, *tmp;
420 
421 	if (!rng->name || (!rng->data_read && !rng->read))
422 		goto out;
423 
424 	mutex_lock(&rng_mutex);
425 	/* Must not register two RNGs with the same name. */
426 	err = -EEXIST;
427 	list_for_each_entry(tmp, &rng_list, list) {
428 		if (strcmp(tmp->name, rng->name) == 0)
429 			goto out_unlock;
430 	}
431 
432 	init_completion(&rng->cleanup_done);
433 	complete(&rng->cleanup_done);
434 
435 	old_rng = current_rng;
436 	err = 0;
437 	if (!old_rng) {
438 		err = set_current_rng(rng);
439 		if (err)
440 			goto out_unlock;
441 	}
442 	list_add_tail(&rng->list, &rng_list);
443 
444 	if (old_rng && !rng->init) {
445 		/*
446 		 * Use a new device's input to add some randomness to
447 		 * the system.  If this rng device isn't going to be
448 		 * used right away, its init function hasn't been
449 		 * called yet; so only use the randomness from devices
450 		 * that don't need an init callback.
451 		 */
452 		add_early_randomness(rng);
453 	}
454 
455 out_unlock:
456 	mutex_unlock(&rng_mutex);
457 out:
458 	return err;
459 }
460 EXPORT_SYMBOL_GPL(hwrng_register);
461 
462 void hwrng_unregister(struct hwrng *rng)
463 {
464 	mutex_lock(&rng_mutex);
465 
466 	list_del(&rng->list);
467 	if (current_rng == rng) {
468 		drop_current_rng();
469 		if (!list_empty(&rng_list)) {
470 			struct hwrng *tail;
471 
472 			tail = list_entry(rng_list.prev, struct hwrng, list);
473 
474 			set_current_rng(tail);
475 		}
476 	}
477 
478 	if (list_empty(&rng_list)) {
479 		mutex_unlock(&rng_mutex);
480 		if (hwrng_fill)
481 			kthread_stop(hwrng_fill);
482 	} else
483 		mutex_unlock(&rng_mutex);
484 
485 	wait_for_completion(&rng->cleanup_done);
486 }
487 EXPORT_SYMBOL_GPL(hwrng_unregister);
488 
489 static void devm_hwrng_release(struct device *dev, void *res)
490 {
491 	hwrng_unregister(*(struct hwrng **)res);
492 }
493 
494 static int devm_hwrng_match(struct device *dev, void *res, void *data)
495 {
496 	struct hwrng **r = res;
497 
498 	if (WARN_ON(!r || !*r))
499 		return 0;
500 
501 	return *r == data;
502 }
503 
504 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
505 {
506 	struct hwrng **ptr;
507 	int error;
508 
509 	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
510 	if (!ptr)
511 		return -ENOMEM;
512 
513 	error = hwrng_register(rng);
514 	if (error) {
515 		devres_free(ptr);
516 		return error;
517 	}
518 
519 	*ptr = rng;
520 	devres_add(dev, ptr);
521 	return 0;
522 }
523 EXPORT_SYMBOL_GPL(devm_hwrng_register);
524 
525 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
526 {
527 	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
528 }
529 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
530 
531 static int __init hwrng_modinit(void)
532 {
533 	int ret = -ENOMEM;
534 
535 	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
536 	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
537 	if (!rng_buffer)
538 		return -ENOMEM;
539 
540 	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
541 	if (!rng_fillbuf) {
542 		kfree(rng_buffer);
543 		return -ENOMEM;
544 	}
545 
546 	ret = register_miscdev();
547 	if (ret) {
548 		kfree(rng_fillbuf);
549 		kfree(rng_buffer);
550 	}
551 
552 	return ret;
553 }
554 
555 static void __exit hwrng_modexit(void)
556 {
557 	mutex_lock(&rng_mutex);
558 	BUG_ON(current_rng);
559 	kfree(rng_buffer);
560 	kfree(rng_fillbuf);
561 	mutex_unlock(&rng_mutex);
562 
563 	unregister_miscdev();
564 }
565 
566 module_init(hwrng_modinit);
567 module_exit(hwrng_modexit);
568 
569 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
570 MODULE_LICENSE("GPL");
571