1 /* 2 Added support for the AMD Geode LX RNG 3 (c) Copyright 2004-2005 Advanced Micro Devices, Inc. 4 5 derived from 6 7 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) 8 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com> 9 10 derived from 11 12 Hardware driver for the AMD 768 Random Number Generator (RNG) 13 (c) Copyright 2001 Red Hat Inc <alan@redhat.com> 14 15 derived from 16 17 Hardware driver for Intel i810 Random Number Generator (RNG) 18 Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com> 19 Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com> 20 21 Added generic RNG API 22 Copyright 2006 Michael Buesch <m@bues.ch> 23 Copyright 2005 (c) MontaVista Software, Inc. 24 25 Please read Documentation/hw_random.txt for details on use. 26 27 ---------------------------------------------------------- 28 This software may be used and distributed according to the terms 29 of the GNU General Public License, incorporated herein by reference. 30 31 */ 32 33 34 #include <linux/device.h> 35 #include <linux/hw_random.h> 36 #include <linux/module.h> 37 #include <linux/kernel.h> 38 #include <linux/fs.h> 39 #include <linux/sched.h> 40 #include <linux/miscdevice.h> 41 #include <linux/kthread.h> 42 #include <linux/delay.h> 43 #include <linux/slab.h> 44 #include <linux/random.h> 45 #include <linux/err.h> 46 #include <asm/uaccess.h> 47 48 49 #define RNG_MODULE_NAME "hw_random" 50 #define PFX RNG_MODULE_NAME ": " 51 #define RNG_MISCDEV_MINOR 183 /* official */ 52 53 54 static struct hwrng *current_rng; 55 static struct task_struct *hwrng_fill; 56 static LIST_HEAD(rng_list); 57 /* Protects rng_list and current_rng */ 58 static DEFINE_MUTEX(rng_mutex); 59 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ 60 static DEFINE_MUTEX(reading_mutex); 61 static int data_avail; 62 static u8 *rng_buffer, *rng_fillbuf; 63 static unsigned short current_quality; 64 static unsigned short default_quality; /* = 0; default to "off" */ 65 66 module_param(current_quality, ushort, 0644); 67 MODULE_PARM_DESC(current_quality, 68 "current hwrng entropy estimation per mill"); 69 module_param(default_quality, ushort, 0644); 70 MODULE_PARM_DESC(default_quality, 71 "default entropy content of hwrng per mill"); 72 73 static void drop_current_rng(void); 74 static int hwrng_init(struct hwrng *rng); 75 static void start_khwrngd(void); 76 77 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 78 int wait); 79 80 static size_t rng_buffer_size(void) 81 { 82 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; 83 } 84 85 static void add_early_randomness(struct hwrng *rng) 86 { 87 unsigned char bytes[16]; 88 int bytes_read; 89 90 mutex_lock(&reading_mutex); 91 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); 92 mutex_unlock(&reading_mutex); 93 if (bytes_read > 0) 94 add_device_randomness(bytes, bytes_read); 95 } 96 97 static inline void cleanup_rng(struct kref *kref) 98 { 99 struct hwrng *rng = container_of(kref, struct hwrng, ref); 100 101 if (rng->cleanup) 102 rng->cleanup(rng); 103 104 complete(&rng->cleanup_done); 105 } 106 107 static int set_current_rng(struct hwrng *rng) 108 { 109 int err; 110 111 BUG_ON(!mutex_is_locked(&rng_mutex)); 112 113 err = hwrng_init(rng); 114 if (err) 115 return err; 116 117 drop_current_rng(); 118 current_rng = rng; 119 120 return 0; 121 } 122 123 static void drop_current_rng(void) 124 { 125 BUG_ON(!mutex_is_locked(&rng_mutex)); 126 if (!current_rng) 127 return; 128 129 /* decrease last reference for triggering the cleanup */ 130 kref_put(¤t_rng->ref, cleanup_rng); 131 current_rng = NULL; 132 } 133 134 /* Returns ERR_PTR(), NULL or refcounted hwrng */ 135 static struct hwrng *get_current_rng(void) 136 { 137 struct hwrng *rng; 138 139 if (mutex_lock_interruptible(&rng_mutex)) 140 return ERR_PTR(-ERESTARTSYS); 141 142 rng = current_rng; 143 if (rng) 144 kref_get(&rng->ref); 145 146 mutex_unlock(&rng_mutex); 147 return rng; 148 } 149 150 static void put_rng(struct hwrng *rng) 151 { 152 /* 153 * Hold rng_mutex here so we serialize in case they set_current_rng 154 * on rng again immediately. 155 */ 156 mutex_lock(&rng_mutex); 157 if (rng) 158 kref_put(&rng->ref, cleanup_rng); 159 mutex_unlock(&rng_mutex); 160 } 161 162 static int hwrng_init(struct hwrng *rng) 163 { 164 if (kref_get_unless_zero(&rng->ref)) 165 goto skip_init; 166 167 if (rng->init) { 168 int ret; 169 170 ret = rng->init(rng); 171 if (ret) 172 return ret; 173 } 174 175 kref_init(&rng->ref); 176 reinit_completion(&rng->cleanup_done); 177 178 skip_init: 179 add_early_randomness(rng); 180 181 current_quality = rng->quality ? : default_quality; 182 if (current_quality > 1024) 183 current_quality = 1024; 184 185 if (current_quality == 0 && hwrng_fill) 186 kthread_stop(hwrng_fill); 187 if (current_quality > 0 && !hwrng_fill) 188 start_khwrngd(); 189 190 return 0; 191 } 192 193 static int rng_dev_open(struct inode *inode, struct file *filp) 194 { 195 /* enforce read-only access to this chrdev */ 196 if ((filp->f_mode & FMODE_READ) == 0) 197 return -EINVAL; 198 if (filp->f_mode & FMODE_WRITE) 199 return -EINVAL; 200 return 0; 201 } 202 203 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 204 int wait) { 205 int present; 206 207 BUG_ON(!mutex_is_locked(&reading_mutex)); 208 if (rng->read) 209 return rng->read(rng, (void *)buffer, size, wait); 210 211 if (rng->data_present) 212 present = rng->data_present(rng, wait); 213 else 214 present = 1; 215 216 if (present) 217 return rng->data_read(rng, (u32 *)buffer); 218 219 return 0; 220 } 221 222 static ssize_t rng_dev_read(struct file *filp, char __user *buf, 223 size_t size, loff_t *offp) 224 { 225 ssize_t ret = 0; 226 int err = 0; 227 int bytes_read, len; 228 struct hwrng *rng; 229 230 while (size) { 231 rng = get_current_rng(); 232 if (IS_ERR(rng)) { 233 err = PTR_ERR(rng); 234 goto out; 235 } 236 if (!rng) { 237 err = -ENODEV; 238 goto out; 239 } 240 241 mutex_lock(&reading_mutex); 242 if (!data_avail) { 243 bytes_read = rng_get_data(rng, rng_buffer, 244 rng_buffer_size(), 245 !(filp->f_flags & O_NONBLOCK)); 246 if (bytes_read < 0) { 247 err = bytes_read; 248 goto out_unlock_reading; 249 } 250 data_avail = bytes_read; 251 } 252 253 if (!data_avail) { 254 if (filp->f_flags & O_NONBLOCK) { 255 err = -EAGAIN; 256 goto out_unlock_reading; 257 } 258 } else { 259 len = data_avail; 260 if (len > size) 261 len = size; 262 263 data_avail -= len; 264 265 if (copy_to_user(buf + ret, rng_buffer + data_avail, 266 len)) { 267 err = -EFAULT; 268 goto out_unlock_reading; 269 } 270 271 size -= len; 272 ret += len; 273 } 274 275 mutex_unlock(&reading_mutex); 276 put_rng(rng); 277 278 if (need_resched()) 279 schedule_timeout_interruptible(1); 280 281 if (signal_pending(current)) { 282 err = -ERESTARTSYS; 283 goto out; 284 } 285 } 286 out: 287 return ret ? : err; 288 289 out_unlock_reading: 290 mutex_unlock(&reading_mutex); 291 put_rng(rng); 292 goto out; 293 } 294 295 296 static const struct file_operations rng_chrdev_ops = { 297 .owner = THIS_MODULE, 298 .open = rng_dev_open, 299 .read = rng_dev_read, 300 .llseek = noop_llseek, 301 }; 302 303 static struct miscdevice rng_miscdev = { 304 .minor = RNG_MISCDEV_MINOR, 305 .name = RNG_MODULE_NAME, 306 .nodename = "hwrng", 307 .fops = &rng_chrdev_ops, 308 }; 309 310 311 static ssize_t hwrng_attr_current_store(struct device *dev, 312 struct device_attribute *attr, 313 const char *buf, size_t len) 314 { 315 int err; 316 struct hwrng *rng; 317 318 err = mutex_lock_interruptible(&rng_mutex); 319 if (err) 320 return -ERESTARTSYS; 321 err = -ENODEV; 322 list_for_each_entry(rng, &rng_list, list) { 323 if (strcmp(rng->name, buf) == 0) { 324 err = 0; 325 if (rng != current_rng) 326 err = set_current_rng(rng); 327 break; 328 } 329 } 330 mutex_unlock(&rng_mutex); 331 332 return err ? : len; 333 } 334 335 static ssize_t hwrng_attr_current_show(struct device *dev, 336 struct device_attribute *attr, 337 char *buf) 338 { 339 ssize_t ret; 340 struct hwrng *rng; 341 342 rng = get_current_rng(); 343 if (IS_ERR(rng)) 344 return PTR_ERR(rng); 345 346 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); 347 put_rng(rng); 348 349 return ret; 350 } 351 352 static ssize_t hwrng_attr_available_show(struct device *dev, 353 struct device_attribute *attr, 354 char *buf) 355 { 356 int err; 357 struct hwrng *rng; 358 359 err = mutex_lock_interruptible(&rng_mutex); 360 if (err) 361 return -ERESTARTSYS; 362 buf[0] = '\0'; 363 list_for_each_entry(rng, &rng_list, list) { 364 strlcat(buf, rng->name, PAGE_SIZE); 365 strlcat(buf, " ", PAGE_SIZE); 366 } 367 strlcat(buf, "\n", PAGE_SIZE); 368 mutex_unlock(&rng_mutex); 369 370 return strlen(buf); 371 } 372 373 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, 374 hwrng_attr_current_show, 375 hwrng_attr_current_store); 376 static DEVICE_ATTR(rng_available, S_IRUGO, 377 hwrng_attr_available_show, 378 NULL); 379 380 381 static void __exit unregister_miscdev(void) 382 { 383 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available); 384 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current); 385 misc_deregister(&rng_miscdev); 386 } 387 388 static int __init register_miscdev(void) 389 { 390 int err; 391 392 err = misc_register(&rng_miscdev); 393 if (err) 394 goto out; 395 err = device_create_file(rng_miscdev.this_device, 396 &dev_attr_rng_current); 397 if (err) 398 goto err_misc_dereg; 399 err = device_create_file(rng_miscdev.this_device, 400 &dev_attr_rng_available); 401 if (err) 402 goto err_remove_current; 403 out: 404 return err; 405 406 err_remove_current: 407 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current); 408 err_misc_dereg: 409 misc_deregister(&rng_miscdev); 410 goto out; 411 } 412 413 static int hwrng_fillfn(void *unused) 414 { 415 long rc; 416 417 while (!kthread_should_stop()) { 418 struct hwrng *rng; 419 420 rng = get_current_rng(); 421 if (IS_ERR(rng) || !rng) 422 break; 423 mutex_lock(&reading_mutex); 424 rc = rng_get_data(rng, rng_fillbuf, 425 rng_buffer_size(), 1); 426 mutex_unlock(&reading_mutex); 427 put_rng(rng); 428 if (rc <= 0) { 429 pr_warn("hwrng: no data available\n"); 430 msleep_interruptible(10000); 431 continue; 432 } 433 /* Outside lock, sure, but y'know: randomness. */ 434 add_hwgenerator_randomness((void *)rng_fillbuf, rc, 435 rc * current_quality * 8 >> 10); 436 } 437 hwrng_fill = NULL; 438 return 0; 439 } 440 441 static void start_khwrngd(void) 442 { 443 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 444 if (hwrng_fill == ERR_PTR(-ENOMEM)) { 445 pr_err("hwrng_fill thread creation failed"); 446 hwrng_fill = NULL; 447 } 448 } 449 450 int hwrng_register(struct hwrng *rng) 451 { 452 int err = -EINVAL; 453 struct hwrng *old_rng, *tmp; 454 455 if (rng->name == NULL || 456 (rng->data_read == NULL && rng->read == NULL)) 457 goto out; 458 459 mutex_lock(&rng_mutex); 460 461 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ 462 err = -ENOMEM; 463 if (!rng_buffer) { 464 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); 465 if (!rng_buffer) 466 goto out_unlock; 467 } 468 if (!rng_fillbuf) { 469 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); 470 if (!rng_fillbuf) { 471 kfree(rng_buffer); 472 goto out_unlock; 473 } 474 } 475 476 /* Must not register two RNGs with the same name. */ 477 err = -EEXIST; 478 list_for_each_entry(tmp, &rng_list, list) { 479 if (strcmp(tmp->name, rng->name) == 0) 480 goto out_unlock; 481 } 482 483 init_completion(&rng->cleanup_done); 484 complete(&rng->cleanup_done); 485 486 old_rng = current_rng; 487 err = 0; 488 if (!old_rng) { 489 err = set_current_rng(rng); 490 if (err) 491 goto out_unlock; 492 } 493 list_add_tail(&rng->list, &rng_list); 494 495 if (old_rng && !rng->init) { 496 /* 497 * Use a new device's input to add some randomness to 498 * the system. If this rng device isn't going to be 499 * used right away, its init function hasn't been 500 * called yet; so only use the randomness from devices 501 * that don't need an init callback. 502 */ 503 add_early_randomness(rng); 504 } 505 506 out_unlock: 507 mutex_unlock(&rng_mutex); 508 out: 509 return err; 510 } 511 EXPORT_SYMBOL_GPL(hwrng_register); 512 513 void hwrng_unregister(struct hwrng *rng) 514 { 515 mutex_lock(&rng_mutex); 516 517 list_del(&rng->list); 518 if (current_rng == rng) { 519 drop_current_rng(); 520 if (!list_empty(&rng_list)) { 521 struct hwrng *tail; 522 523 tail = list_entry(rng_list.prev, struct hwrng, list); 524 525 set_current_rng(tail); 526 } 527 } 528 529 if (list_empty(&rng_list)) { 530 mutex_unlock(&rng_mutex); 531 if (hwrng_fill) 532 kthread_stop(hwrng_fill); 533 } else 534 mutex_unlock(&rng_mutex); 535 536 wait_for_completion(&rng->cleanup_done); 537 } 538 EXPORT_SYMBOL_GPL(hwrng_unregister); 539 540 static void devm_hwrng_release(struct device *dev, void *res) 541 { 542 hwrng_unregister(*(struct hwrng **)res); 543 } 544 545 static int devm_hwrng_match(struct device *dev, void *res, void *data) 546 { 547 struct hwrng **r = res; 548 549 if (WARN_ON(!r || !*r)) 550 return 0; 551 552 return *r == data; 553 } 554 555 int devm_hwrng_register(struct device *dev, struct hwrng *rng) 556 { 557 struct hwrng **ptr; 558 int error; 559 560 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); 561 if (!ptr) 562 return -ENOMEM; 563 564 error = hwrng_register(rng); 565 if (error) { 566 devres_free(ptr); 567 return error; 568 } 569 570 *ptr = rng; 571 devres_add(dev, ptr); 572 return 0; 573 } 574 EXPORT_SYMBOL_GPL(devm_hwrng_register); 575 576 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) 577 { 578 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); 579 } 580 EXPORT_SYMBOL_GPL(devm_hwrng_unregister); 581 582 static int __init hwrng_modinit(void) 583 { 584 return register_miscdev(); 585 } 586 587 static void __exit hwrng_modexit(void) 588 { 589 mutex_lock(&rng_mutex); 590 BUG_ON(current_rng); 591 kfree(rng_buffer); 592 kfree(rng_fillbuf); 593 mutex_unlock(&rng_mutex); 594 595 unregister_miscdev(); 596 } 597 598 module_init(hwrng_modinit); 599 module_exit(hwrng_modexit); 600 601 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); 602 MODULE_LICENSE("GPL"); 603