1 /* 2 * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $ 3 * 4 * Character-device access to raw MTD devices. 5 * 6 */ 7 8 #include <linux/device.h> 9 #include <linux/fs.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 16 #include <linux/mtd/mtd.h> 17 #include <linux/mtd/compatmac.h> 18 19 #include <asm/uaccess.h> 20 21 static struct class *mtd_class; 22 23 static void mtd_notify_add(struct mtd_info* mtd) 24 { 25 if (!mtd) 26 return; 27 28 class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), 29 NULL, "mtd%d", mtd->index); 30 31 class_device_create(mtd_class, NULL, 32 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), 33 NULL, "mtd%dro", mtd->index); 34 } 35 36 static void mtd_notify_remove(struct mtd_info* mtd) 37 { 38 if (!mtd) 39 return; 40 41 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2)); 42 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1)); 43 } 44 45 static struct mtd_notifier notifier = { 46 .add = mtd_notify_add, 47 .remove = mtd_notify_remove, 48 }; 49 50 /* 51 * Data structure to hold the pointer to the mtd device as well 52 * as mode information ofr various use cases. 53 */ 54 struct mtd_file_info { 55 struct mtd_info *mtd; 56 enum mtd_file_modes mode; 57 }; 58 59 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 60 { 61 struct mtd_file_info *mfi = file->private_data; 62 struct mtd_info *mtd = mfi->mtd; 63 64 switch (orig) { 65 case 0: 66 /* SEEK_SET */ 67 break; 68 case 1: 69 /* SEEK_CUR */ 70 offset += file->f_pos; 71 break; 72 case 2: 73 /* SEEK_END */ 74 offset += mtd->size; 75 break; 76 default: 77 return -EINVAL; 78 } 79 80 if (offset >= 0 && offset <= mtd->size) 81 return file->f_pos = offset; 82 83 return -EINVAL; 84 } 85 86 87 88 static int mtd_open(struct inode *inode, struct file *file) 89 { 90 int minor = iminor(inode); 91 int devnum = minor >> 1; 92 struct mtd_info *mtd; 93 struct mtd_file_info *mfi; 94 95 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 96 97 if (devnum >= MAX_MTD_DEVICES) 98 return -ENODEV; 99 100 /* You can't open the RO devices RW */ 101 if ((file->f_mode & 2) && (minor & 1)) 102 return -EACCES; 103 104 mtd = get_mtd_device(NULL, devnum); 105 106 if (!mtd) 107 return -ENODEV; 108 109 if (MTD_ABSENT == mtd->type) { 110 put_mtd_device(mtd); 111 return -ENODEV; 112 } 113 114 /* You can't open it RW if it's not a writeable device */ 115 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { 116 put_mtd_device(mtd); 117 return -EACCES; 118 } 119 120 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 121 if (!mfi) { 122 put_mtd_device(mtd); 123 return -ENOMEM; 124 } 125 mfi->mtd = mtd; 126 file->private_data = mfi; 127 128 return 0; 129 } /* mtd_open */ 130 131 /*====================================================================*/ 132 133 static int mtd_close(struct inode *inode, struct file *file) 134 { 135 struct mtd_file_info *mfi = file->private_data; 136 struct mtd_info *mtd = mfi->mtd; 137 138 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 139 140 if (mtd->sync) 141 mtd->sync(mtd); 142 143 put_mtd_device(mtd); 144 file->private_data = NULL; 145 kfree(mfi); 146 147 return 0; 148 } /* mtd_close */ 149 150 /* FIXME: This _really_ needs to die. In 2.5, we should lock the 151 userspace buffer down and use it directly with readv/writev. 152 */ 153 #define MAX_KMALLOC_SIZE 0x20000 154 155 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 156 { 157 struct mtd_file_info *mfi = file->private_data; 158 struct mtd_info *mtd = mfi->mtd; 159 size_t retlen=0; 160 size_t total_retlen=0; 161 int ret=0; 162 int len; 163 char *kbuf; 164 165 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 166 167 if (*ppos + count > mtd->size) 168 count = mtd->size - *ppos; 169 170 if (!count) 171 return 0; 172 173 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 174 and pass them directly to the MTD functions */ 175 176 if (count > MAX_KMALLOC_SIZE) 177 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 178 else 179 kbuf=kmalloc(count, GFP_KERNEL); 180 181 if (!kbuf) 182 return -ENOMEM; 183 184 while (count) { 185 186 if (count > MAX_KMALLOC_SIZE) 187 len = MAX_KMALLOC_SIZE; 188 else 189 len = count; 190 191 switch (mfi->mode) { 192 case MTD_MODE_OTP_FACTORY: 193 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 194 break; 195 case MTD_MODE_OTP_USER: 196 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 197 break; 198 case MTD_MODE_RAW: 199 { 200 struct mtd_oob_ops ops; 201 202 ops.mode = MTD_OOB_RAW; 203 ops.datbuf = kbuf; 204 ops.oobbuf = NULL; 205 ops.len = len; 206 207 ret = mtd->read_oob(mtd, *ppos, &ops); 208 retlen = ops.retlen; 209 break; 210 } 211 default: 212 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 213 } 214 /* Nand returns -EBADMSG on ecc errors, but it returns 215 * the data. For our userspace tools it is important 216 * to dump areas with ecc errors ! 217 * For kernel internal usage it also might return -EUCLEAN 218 * to signal the caller that a bitflip has occured and has 219 * been corrected by the ECC algorithm. 220 * Userspace software which accesses NAND this way 221 * must be aware of the fact that it deals with NAND 222 */ 223 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { 224 *ppos += retlen; 225 if (copy_to_user(buf, kbuf, retlen)) { 226 kfree(kbuf); 227 return -EFAULT; 228 } 229 else 230 total_retlen += retlen; 231 232 count -= retlen; 233 buf += retlen; 234 if (retlen == 0) 235 count = 0; 236 } 237 else { 238 kfree(kbuf); 239 return ret; 240 } 241 242 } 243 244 kfree(kbuf); 245 return total_retlen; 246 } /* mtd_read */ 247 248 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 249 { 250 struct mtd_file_info *mfi = file->private_data; 251 struct mtd_info *mtd = mfi->mtd; 252 char *kbuf; 253 size_t retlen; 254 size_t total_retlen=0; 255 int ret=0; 256 int len; 257 258 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 259 260 if (*ppos == mtd->size) 261 return -ENOSPC; 262 263 if (*ppos + count > mtd->size) 264 count = mtd->size - *ppos; 265 266 if (!count) 267 return 0; 268 269 if (count > MAX_KMALLOC_SIZE) 270 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 271 else 272 kbuf=kmalloc(count, GFP_KERNEL); 273 274 if (!kbuf) 275 return -ENOMEM; 276 277 while (count) { 278 279 if (count > MAX_KMALLOC_SIZE) 280 len = MAX_KMALLOC_SIZE; 281 else 282 len = count; 283 284 if (copy_from_user(kbuf, buf, len)) { 285 kfree(kbuf); 286 return -EFAULT; 287 } 288 289 switch (mfi->mode) { 290 case MTD_MODE_OTP_FACTORY: 291 ret = -EROFS; 292 break; 293 case MTD_MODE_OTP_USER: 294 if (!mtd->write_user_prot_reg) { 295 ret = -EOPNOTSUPP; 296 break; 297 } 298 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 299 break; 300 301 case MTD_MODE_RAW: 302 { 303 struct mtd_oob_ops ops; 304 305 ops.mode = MTD_OOB_RAW; 306 ops.datbuf = kbuf; 307 ops.oobbuf = NULL; 308 ops.len = len; 309 310 ret = mtd->write_oob(mtd, *ppos, &ops); 311 retlen = ops.retlen; 312 break; 313 } 314 315 default: 316 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 317 } 318 if (!ret) { 319 *ppos += retlen; 320 total_retlen += retlen; 321 count -= retlen; 322 buf += retlen; 323 } 324 else { 325 kfree(kbuf); 326 return ret; 327 } 328 } 329 330 kfree(kbuf); 331 return total_retlen; 332 } /* mtd_write */ 333 334 /*====================================================================== 335 336 IOCTL calls for getting device parameters. 337 338 ======================================================================*/ 339 static void mtdchar_erase_callback (struct erase_info *instr) 340 { 341 wake_up((wait_queue_head_t *)instr->priv); 342 } 343 344 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 345 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 346 { 347 struct mtd_info *mtd = mfi->mtd; 348 int ret = 0; 349 350 switch (mode) { 351 case MTD_OTP_FACTORY: 352 if (!mtd->read_fact_prot_reg) 353 ret = -EOPNOTSUPP; 354 else 355 mfi->mode = MTD_MODE_OTP_FACTORY; 356 break; 357 case MTD_OTP_USER: 358 if (!mtd->read_fact_prot_reg) 359 ret = -EOPNOTSUPP; 360 else 361 mfi->mode = MTD_MODE_OTP_USER; 362 break; 363 default: 364 ret = -EINVAL; 365 case MTD_OTP_OFF: 366 break; 367 } 368 return ret; 369 } 370 #else 371 # define otp_select_filemode(f,m) -EOPNOTSUPP 372 #endif 373 374 static int mtd_ioctl(struct inode *inode, struct file *file, 375 u_int cmd, u_long arg) 376 { 377 struct mtd_file_info *mfi = file->private_data; 378 struct mtd_info *mtd = mfi->mtd; 379 void __user *argp = (void __user *)arg; 380 int ret = 0; 381 u_long size; 382 struct mtd_info_user info; 383 384 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 385 386 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 387 if (cmd & IOC_IN) { 388 if (!access_ok(VERIFY_READ, argp, size)) 389 return -EFAULT; 390 } 391 if (cmd & IOC_OUT) { 392 if (!access_ok(VERIFY_WRITE, argp, size)) 393 return -EFAULT; 394 } 395 396 switch (cmd) { 397 case MEMGETREGIONCOUNT: 398 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 399 return -EFAULT; 400 break; 401 402 case MEMGETREGIONINFO: 403 { 404 struct region_info_user ur; 405 406 if (copy_from_user(&ur, argp, sizeof(struct region_info_user))) 407 return -EFAULT; 408 409 if (ur.regionindex >= mtd->numeraseregions) 410 return -EINVAL; 411 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]), 412 sizeof(struct mtd_erase_region_info))) 413 return -EFAULT; 414 break; 415 } 416 417 case MEMGETINFO: 418 info.type = mtd->type; 419 info.flags = mtd->flags; 420 info.size = mtd->size; 421 info.erasesize = mtd->erasesize; 422 info.writesize = mtd->writesize; 423 info.oobsize = mtd->oobsize; 424 info.ecctype = mtd->ecctype; 425 info.eccsize = mtd->eccsize; 426 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 427 return -EFAULT; 428 break; 429 430 case MEMERASE: 431 { 432 struct erase_info *erase; 433 434 if(!(file->f_mode & 2)) 435 return -EPERM; 436 437 erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL); 438 if (!erase) 439 ret = -ENOMEM; 440 else { 441 wait_queue_head_t waitq; 442 DECLARE_WAITQUEUE(wait, current); 443 444 init_waitqueue_head(&waitq); 445 446 memset (erase,0,sizeof(struct erase_info)); 447 if (copy_from_user(&erase->addr, argp, 448 sizeof(struct erase_info_user))) { 449 kfree(erase); 450 return -EFAULT; 451 } 452 erase->mtd = mtd; 453 erase->callback = mtdchar_erase_callback; 454 erase->priv = (unsigned long)&waitq; 455 456 /* 457 FIXME: Allow INTERRUPTIBLE. Which means 458 not having the wait_queue head on the stack. 459 460 If the wq_head is on the stack, and we 461 leave because we got interrupted, then the 462 wq_head is no longer there when the 463 callback routine tries to wake us up. 464 */ 465 ret = mtd->erase(mtd, erase); 466 if (!ret) { 467 set_current_state(TASK_UNINTERRUPTIBLE); 468 add_wait_queue(&waitq, &wait); 469 if (erase->state != MTD_ERASE_DONE && 470 erase->state != MTD_ERASE_FAILED) 471 schedule(); 472 remove_wait_queue(&waitq, &wait); 473 set_current_state(TASK_RUNNING); 474 475 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 476 } 477 kfree(erase); 478 } 479 break; 480 } 481 482 case MEMWRITEOOB: 483 { 484 struct mtd_oob_buf buf; 485 struct mtd_oob_ops ops; 486 487 if(!(file->f_mode & 2)) 488 return -EPERM; 489 490 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 491 return -EFAULT; 492 493 if (buf.length > 4096) 494 return -EINVAL; 495 496 if (!mtd->write_oob) 497 ret = -EOPNOTSUPP; 498 else 499 ret = access_ok(VERIFY_READ, buf.ptr, 500 buf.length) ? 0 : EFAULT; 501 502 if (ret) 503 return ret; 504 505 ops.len = buf.length; 506 ops.ooblen = buf.length; 507 ops.ooboffs = buf.start & (mtd->oobsize - 1); 508 ops.datbuf = NULL; 509 ops.mode = MTD_OOB_PLACE; 510 511 if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs)) 512 return -EINVAL; 513 514 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 515 if (!ops.oobbuf) 516 return -ENOMEM; 517 518 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) { 519 kfree(ops.oobbuf); 520 return -EFAULT; 521 } 522 523 buf.start &= ~(mtd->oobsize - 1); 524 ret = mtd->write_oob(mtd, buf.start, &ops); 525 526 if (copy_to_user(argp + sizeof(uint32_t), &ops.retlen, 527 sizeof(uint32_t))) 528 ret = -EFAULT; 529 530 kfree(ops.oobbuf); 531 break; 532 533 } 534 535 case MEMREADOOB: 536 { 537 struct mtd_oob_buf buf; 538 struct mtd_oob_ops ops; 539 540 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 541 return -EFAULT; 542 543 if (buf.length > 4096) 544 return -EINVAL; 545 546 if (!mtd->read_oob) 547 ret = -EOPNOTSUPP; 548 else 549 ret = access_ok(VERIFY_WRITE, buf.ptr, 550 buf.length) ? 0 : -EFAULT; 551 if (ret) 552 return ret; 553 554 ops.len = buf.length; 555 ops.ooblen = buf.length; 556 ops.ooboffs = buf.start & (mtd->oobsize - 1); 557 ops.datbuf = NULL; 558 ops.mode = MTD_OOB_PLACE; 559 560 if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs)) 561 return -EINVAL; 562 563 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 564 if (!ops.oobbuf) 565 return -ENOMEM; 566 567 buf.start &= ~(mtd->oobsize - 1); 568 ret = mtd->read_oob(mtd, buf.start, &ops); 569 570 if (put_user(ops.retlen, (uint32_t __user *)argp)) 571 ret = -EFAULT; 572 else if (ops.retlen && copy_to_user(buf.ptr, ops.oobbuf, 573 ops.retlen)) 574 ret = -EFAULT; 575 576 kfree(ops.oobbuf); 577 break; 578 } 579 580 case MEMLOCK: 581 { 582 struct erase_info_user info; 583 584 if (copy_from_user(&info, argp, sizeof(info))) 585 return -EFAULT; 586 587 if (!mtd->lock) 588 ret = -EOPNOTSUPP; 589 else 590 ret = mtd->lock(mtd, info.start, info.length); 591 break; 592 } 593 594 case MEMUNLOCK: 595 { 596 struct erase_info_user info; 597 598 if (copy_from_user(&info, argp, sizeof(info))) 599 return -EFAULT; 600 601 if (!mtd->unlock) 602 ret = -EOPNOTSUPP; 603 else 604 ret = mtd->unlock(mtd, info.start, info.length); 605 break; 606 } 607 608 /* Legacy interface */ 609 case MEMGETOOBSEL: 610 { 611 struct nand_oobinfo oi; 612 613 if (!mtd->ecclayout) 614 return -EOPNOTSUPP; 615 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) 616 return -EINVAL; 617 618 oi.useecc = MTD_NANDECC_AUTOPLACE; 619 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); 620 memcpy(&oi.oobfree, mtd->ecclayout->oobfree, 621 sizeof(oi.oobfree)); 622 623 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 624 return -EFAULT; 625 break; 626 } 627 628 case MEMGETBADBLOCK: 629 { 630 loff_t offs; 631 632 if (copy_from_user(&offs, argp, sizeof(loff_t))) 633 return -EFAULT; 634 if (!mtd->block_isbad) 635 ret = -EOPNOTSUPP; 636 else 637 return mtd->block_isbad(mtd, offs); 638 break; 639 } 640 641 case MEMSETBADBLOCK: 642 { 643 loff_t offs; 644 645 if (copy_from_user(&offs, argp, sizeof(loff_t))) 646 return -EFAULT; 647 if (!mtd->block_markbad) 648 ret = -EOPNOTSUPP; 649 else 650 return mtd->block_markbad(mtd, offs); 651 break; 652 } 653 654 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 655 case OTPSELECT: 656 { 657 int mode; 658 if (copy_from_user(&mode, argp, sizeof(int))) 659 return -EFAULT; 660 661 mfi->mode = MTD_MODE_NORMAL; 662 663 ret = otp_select_filemode(mfi, mode); 664 665 file->f_pos = 0; 666 break; 667 } 668 669 case OTPGETREGIONCOUNT: 670 case OTPGETREGIONINFO: 671 { 672 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 673 if (!buf) 674 return -ENOMEM; 675 ret = -EOPNOTSUPP; 676 switch (mfi->mode) { 677 case MTD_MODE_OTP_FACTORY: 678 if (mtd->get_fact_prot_info) 679 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 680 break; 681 case MTD_MODE_OTP_USER: 682 if (mtd->get_user_prot_info) 683 ret = mtd->get_user_prot_info(mtd, buf, 4096); 684 break; 685 default: 686 break; 687 } 688 if (ret >= 0) { 689 if (cmd == OTPGETREGIONCOUNT) { 690 int nbr = ret / sizeof(struct otp_info); 691 ret = copy_to_user(argp, &nbr, sizeof(int)); 692 } else 693 ret = copy_to_user(argp, buf, ret); 694 if (ret) 695 ret = -EFAULT; 696 } 697 kfree(buf); 698 break; 699 } 700 701 case OTPLOCK: 702 { 703 struct otp_info info; 704 705 if (mfi->mode != MTD_MODE_OTP_USER) 706 return -EINVAL; 707 if (copy_from_user(&info, argp, sizeof(info))) 708 return -EFAULT; 709 if (!mtd->lock_user_prot_reg) 710 return -EOPNOTSUPP; 711 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length); 712 break; 713 } 714 #endif 715 716 case ECCGETLAYOUT: 717 { 718 if (!mtd->ecclayout) 719 return -EOPNOTSUPP; 720 721 if (copy_to_user(argp, &mtd->ecclayout, 722 sizeof(struct nand_ecclayout))) 723 return -EFAULT; 724 break; 725 } 726 727 case ECCGETSTATS: 728 { 729 if (copy_to_user(argp, &mtd->ecc_stats, 730 sizeof(struct mtd_ecc_stats))) 731 return -EFAULT; 732 break; 733 } 734 735 case MTDFILEMODE: 736 { 737 mfi->mode = 0; 738 739 switch(arg) { 740 case MTD_MODE_OTP_FACTORY: 741 case MTD_MODE_OTP_USER: 742 ret = otp_select_filemode(mfi, arg); 743 break; 744 745 case MTD_MODE_RAW: 746 if (!mtd->read_oob || !mtd->write_oob) 747 return -EOPNOTSUPP; 748 mfi->mode = arg; 749 750 case MTD_MODE_NORMAL: 751 break; 752 default: 753 ret = -EINVAL; 754 } 755 file->f_pos = 0; 756 break; 757 } 758 759 default: 760 ret = -ENOTTY; 761 } 762 763 return ret; 764 } /* memory_ioctl */ 765 766 static struct file_operations mtd_fops = { 767 .owner = THIS_MODULE, 768 .llseek = mtd_lseek, 769 .read = mtd_read, 770 .write = mtd_write, 771 .ioctl = mtd_ioctl, 772 .open = mtd_open, 773 .release = mtd_close, 774 }; 775 776 static int __init init_mtdchar(void) 777 { 778 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) { 779 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", 780 MTD_CHAR_MAJOR); 781 return -EAGAIN; 782 } 783 784 mtd_class = class_create(THIS_MODULE, "mtd"); 785 786 if (IS_ERR(mtd_class)) { 787 printk(KERN_ERR "Error creating mtd class.\n"); 788 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 789 return PTR_ERR(mtd_class); 790 } 791 792 register_mtd_user(¬ifier); 793 return 0; 794 } 795 796 static void __exit cleanup_mtdchar(void) 797 { 798 unregister_mtd_user(¬ifier); 799 class_destroy(mtd_class); 800 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 801 } 802 803 module_init(init_mtdchar); 804 module_exit(cleanup_mtdchar); 805 806 807 MODULE_LICENSE("GPL"); 808 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 809 MODULE_DESCRIPTION("Direct character-device access to MTD devices"); 810