1 /* 2 * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $ 3 * 4 * Character-device access to raw MTD devices. 5 * 6 */ 7 8 #include <linux/device.h> 9 #include <linux/fs.h> 10 #include <linux/mm.h> 11 #include <linux/err.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/sched.h> 17 18 #include <linux/mtd/mtd.h> 19 #include <linux/mtd/compatmac.h> 20 21 #include <asm/uaccess.h> 22 23 static struct class *mtd_class; 24 25 static void mtd_notify_add(struct mtd_info* mtd) 26 { 27 if (!mtd) 28 return; 29 30 class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), 31 NULL, "mtd%d", mtd->index); 32 33 class_device_create(mtd_class, NULL, 34 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), 35 NULL, "mtd%dro", mtd->index); 36 } 37 38 static void mtd_notify_remove(struct mtd_info* mtd) 39 { 40 if (!mtd) 41 return; 42 43 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2)); 44 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1)); 45 } 46 47 static struct mtd_notifier notifier = { 48 .add = mtd_notify_add, 49 .remove = mtd_notify_remove, 50 }; 51 52 /* 53 * Data structure to hold the pointer to the mtd device as well 54 * as mode information ofr various use cases. 55 */ 56 struct mtd_file_info { 57 struct mtd_info *mtd; 58 enum mtd_file_modes mode; 59 }; 60 61 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 62 { 63 struct mtd_file_info *mfi = file->private_data; 64 struct mtd_info *mtd = mfi->mtd; 65 66 switch (orig) { 67 case SEEK_SET: 68 break; 69 case SEEK_CUR: 70 offset += file->f_pos; 71 break; 72 case SEEK_END: 73 offset += mtd->size; 74 break; 75 default: 76 return -EINVAL; 77 } 78 79 if (offset >= 0 && offset <= mtd->size) 80 return file->f_pos = offset; 81 82 return -EINVAL; 83 } 84 85 86 87 static int mtd_open(struct inode *inode, struct file *file) 88 { 89 int minor = iminor(inode); 90 int devnum = minor >> 1; 91 struct mtd_info *mtd; 92 struct mtd_file_info *mfi; 93 94 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 95 96 if (devnum >= MAX_MTD_DEVICES) 97 return -ENODEV; 98 99 /* You can't open the RO devices RW */ 100 if ((file->f_mode & 2) && (minor & 1)) 101 return -EACCES; 102 103 mtd = get_mtd_device(NULL, devnum); 104 105 if (IS_ERR(mtd)) 106 return PTR_ERR(mtd); 107 108 if (MTD_ABSENT == mtd->type) { 109 put_mtd_device(mtd); 110 return -ENODEV; 111 } 112 113 /* You can't open it RW if it's not a writeable device */ 114 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { 115 put_mtd_device(mtd); 116 return -EACCES; 117 } 118 119 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 120 if (!mfi) { 121 put_mtd_device(mtd); 122 return -ENOMEM; 123 } 124 mfi->mtd = mtd; 125 file->private_data = mfi; 126 127 return 0; 128 } /* mtd_open */ 129 130 /*====================================================================*/ 131 132 static int mtd_close(struct inode *inode, struct file *file) 133 { 134 struct mtd_file_info *mfi = file->private_data; 135 struct mtd_info *mtd = mfi->mtd; 136 137 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 138 139 if (mtd->sync) 140 mtd->sync(mtd); 141 142 put_mtd_device(mtd); 143 file->private_data = NULL; 144 kfree(mfi); 145 146 return 0; 147 } /* mtd_close */ 148 149 /* FIXME: This _really_ needs to die. In 2.5, we should lock the 150 userspace buffer down and use it directly with readv/writev. 151 */ 152 #define MAX_KMALLOC_SIZE 0x20000 153 154 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 155 { 156 struct mtd_file_info *mfi = file->private_data; 157 struct mtd_info *mtd = mfi->mtd; 158 size_t retlen=0; 159 size_t total_retlen=0; 160 int ret=0; 161 int len; 162 char *kbuf; 163 164 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 165 166 if (*ppos + count > mtd->size) 167 count = mtd->size - *ppos; 168 169 if (!count) 170 return 0; 171 172 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 173 and pass them directly to the MTD functions */ 174 175 if (count > MAX_KMALLOC_SIZE) 176 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 177 else 178 kbuf=kmalloc(count, GFP_KERNEL); 179 180 if (!kbuf) 181 return -ENOMEM; 182 183 while (count) { 184 185 if (count > MAX_KMALLOC_SIZE) 186 len = MAX_KMALLOC_SIZE; 187 else 188 len = count; 189 190 switch (mfi->mode) { 191 case MTD_MODE_OTP_FACTORY: 192 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 193 break; 194 case MTD_MODE_OTP_USER: 195 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 196 break; 197 case MTD_MODE_RAW: 198 { 199 struct mtd_oob_ops ops; 200 201 ops.mode = MTD_OOB_RAW; 202 ops.datbuf = kbuf; 203 ops.oobbuf = NULL; 204 ops.len = len; 205 206 ret = mtd->read_oob(mtd, *ppos, &ops); 207 retlen = ops.retlen; 208 break; 209 } 210 default: 211 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 212 } 213 /* Nand returns -EBADMSG on ecc errors, but it returns 214 * the data. For our userspace tools it is important 215 * to dump areas with ecc errors ! 216 * For kernel internal usage it also might return -EUCLEAN 217 * to signal the caller that a bitflip has occured and has 218 * been corrected by the ECC algorithm. 219 * Userspace software which accesses NAND this way 220 * must be aware of the fact that it deals with NAND 221 */ 222 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { 223 *ppos += retlen; 224 if (copy_to_user(buf, kbuf, retlen)) { 225 kfree(kbuf); 226 return -EFAULT; 227 } 228 else 229 total_retlen += retlen; 230 231 count -= retlen; 232 buf += retlen; 233 if (retlen == 0) 234 count = 0; 235 } 236 else { 237 kfree(kbuf); 238 return ret; 239 } 240 241 } 242 243 kfree(kbuf); 244 return total_retlen; 245 } /* mtd_read */ 246 247 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 248 { 249 struct mtd_file_info *mfi = file->private_data; 250 struct mtd_info *mtd = mfi->mtd; 251 char *kbuf; 252 size_t retlen; 253 size_t total_retlen=0; 254 int ret=0; 255 int len; 256 257 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 258 259 if (*ppos == mtd->size) 260 return -ENOSPC; 261 262 if (*ppos + count > mtd->size) 263 count = mtd->size - *ppos; 264 265 if (!count) 266 return 0; 267 268 if (count > MAX_KMALLOC_SIZE) 269 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 270 else 271 kbuf=kmalloc(count, GFP_KERNEL); 272 273 if (!kbuf) 274 return -ENOMEM; 275 276 while (count) { 277 278 if (count > MAX_KMALLOC_SIZE) 279 len = MAX_KMALLOC_SIZE; 280 else 281 len = count; 282 283 if (copy_from_user(kbuf, buf, len)) { 284 kfree(kbuf); 285 return -EFAULT; 286 } 287 288 switch (mfi->mode) { 289 case MTD_MODE_OTP_FACTORY: 290 ret = -EROFS; 291 break; 292 case MTD_MODE_OTP_USER: 293 if (!mtd->write_user_prot_reg) { 294 ret = -EOPNOTSUPP; 295 break; 296 } 297 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 298 break; 299 300 case MTD_MODE_RAW: 301 { 302 struct mtd_oob_ops ops; 303 304 ops.mode = MTD_OOB_RAW; 305 ops.datbuf = kbuf; 306 ops.oobbuf = NULL; 307 ops.len = len; 308 309 ret = mtd->write_oob(mtd, *ppos, &ops); 310 retlen = ops.retlen; 311 break; 312 } 313 314 default: 315 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 316 } 317 if (!ret) { 318 *ppos += retlen; 319 total_retlen += retlen; 320 count -= retlen; 321 buf += retlen; 322 } 323 else { 324 kfree(kbuf); 325 return ret; 326 } 327 } 328 329 kfree(kbuf); 330 return total_retlen; 331 } /* mtd_write */ 332 333 /*====================================================================== 334 335 IOCTL calls for getting device parameters. 336 337 ======================================================================*/ 338 static void mtdchar_erase_callback (struct erase_info *instr) 339 { 340 wake_up((wait_queue_head_t *)instr->priv); 341 } 342 343 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 344 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 345 { 346 struct mtd_info *mtd = mfi->mtd; 347 int ret = 0; 348 349 switch (mode) { 350 case MTD_OTP_FACTORY: 351 if (!mtd->read_fact_prot_reg) 352 ret = -EOPNOTSUPP; 353 else 354 mfi->mode = MTD_MODE_OTP_FACTORY; 355 break; 356 case MTD_OTP_USER: 357 if (!mtd->read_fact_prot_reg) 358 ret = -EOPNOTSUPP; 359 else 360 mfi->mode = MTD_MODE_OTP_USER; 361 break; 362 default: 363 ret = -EINVAL; 364 case MTD_OTP_OFF: 365 break; 366 } 367 return ret; 368 } 369 #else 370 # define otp_select_filemode(f,m) -EOPNOTSUPP 371 #endif 372 373 static int mtd_ioctl(struct inode *inode, struct file *file, 374 u_int cmd, u_long arg) 375 { 376 struct mtd_file_info *mfi = file->private_data; 377 struct mtd_info *mtd = mfi->mtd; 378 void __user *argp = (void __user *)arg; 379 int ret = 0; 380 u_long size; 381 struct mtd_info_user info; 382 383 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 384 385 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 386 if (cmd & IOC_IN) { 387 if (!access_ok(VERIFY_READ, argp, size)) 388 return -EFAULT; 389 } 390 if (cmd & IOC_OUT) { 391 if (!access_ok(VERIFY_WRITE, argp, size)) 392 return -EFAULT; 393 } 394 395 switch (cmd) { 396 case MEMGETREGIONCOUNT: 397 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 398 return -EFAULT; 399 break; 400 401 case MEMGETREGIONINFO: 402 { 403 struct region_info_user ur; 404 405 if (copy_from_user(&ur, argp, sizeof(struct region_info_user))) 406 return -EFAULT; 407 408 if (ur.regionindex >= mtd->numeraseregions) 409 return -EINVAL; 410 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]), 411 sizeof(struct mtd_erase_region_info))) 412 return -EFAULT; 413 break; 414 } 415 416 case MEMGETINFO: 417 info.type = mtd->type; 418 info.flags = mtd->flags; 419 info.size = mtd->size; 420 info.erasesize = mtd->erasesize; 421 info.writesize = mtd->writesize; 422 info.oobsize = mtd->oobsize; 423 /* The below fields are obsolete */ 424 info.ecctype = -1; 425 info.eccsize = 0; 426 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 427 return -EFAULT; 428 break; 429 430 case MEMERASE: 431 { 432 struct erase_info *erase; 433 434 if(!(file->f_mode & 2)) 435 return -EPERM; 436 437 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 438 if (!erase) 439 ret = -ENOMEM; 440 else { 441 wait_queue_head_t waitq; 442 DECLARE_WAITQUEUE(wait, current); 443 444 init_waitqueue_head(&waitq); 445 446 if (copy_from_user(&erase->addr, argp, 447 sizeof(struct erase_info_user))) { 448 kfree(erase); 449 return -EFAULT; 450 } 451 erase->mtd = mtd; 452 erase->callback = mtdchar_erase_callback; 453 erase->priv = (unsigned long)&waitq; 454 455 /* 456 FIXME: Allow INTERRUPTIBLE. Which means 457 not having the wait_queue head on the stack. 458 459 If the wq_head is on the stack, and we 460 leave because we got interrupted, then the 461 wq_head is no longer there when the 462 callback routine tries to wake us up. 463 */ 464 ret = mtd->erase(mtd, erase); 465 if (!ret) { 466 set_current_state(TASK_UNINTERRUPTIBLE); 467 add_wait_queue(&waitq, &wait); 468 if (erase->state != MTD_ERASE_DONE && 469 erase->state != MTD_ERASE_FAILED) 470 schedule(); 471 remove_wait_queue(&waitq, &wait); 472 set_current_state(TASK_RUNNING); 473 474 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 475 } 476 kfree(erase); 477 } 478 break; 479 } 480 481 case MEMWRITEOOB: 482 { 483 struct mtd_oob_buf buf; 484 struct mtd_oob_ops ops; 485 486 if(!(file->f_mode & 2)) 487 return -EPERM; 488 489 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 490 return -EFAULT; 491 492 if (buf.length > 4096) 493 return -EINVAL; 494 495 if (!mtd->write_oob) 496 ret = -EOPNOTSUPP; 497 else 498 ret = access_ok(VERIFY_READ, buf.ptr, 499 buf.length) ? 0 : EFAULT; 500 501 if (ret) 502 return ret; 503 504 ops.ooblen = buf.length; 505 ops.ooboffs = buf.start & (mtd->oobsize - 1); 506 ops.datbuf = NULL; 507 ops.mode = MTD_OOB_PLACE; 508 509 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 510 return -EINVAL; 511 512 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 513 if (!ops.oobbuf) 514 return -ENOMEM; 515 516 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) { 517 kfree(ops.oobbuf); 518 return -EFAULT; 519 } 520 521 buf.start &= ~(mtd->oobsize - 1); 522 ret = mtd->write_oob(mtd, buf.start, &ops); 523 524 if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen, 525 sizeof(uint32_t))) 526 ret = -EFAULT; 527 528 kfree(ops.oobbuf); 529 break; 530 531 } 532 533 case MEMREADOOB: 534 { 535 struct mtd_oob_buf buf; 536 struct mtd_oob_ops ops; 537 538 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 539 return -EFAULT; 540 541 if (buf.length > 4096) 542 return -EINVAL; 543 544 if (!mtd->read_oob) 545 ret = -EOPNOTSUPP; 546 else 547 ret = access_ok(VERIFY_WRITE, buf.ptr, 548 buf.length) ? 0 : -EFAULT; 549 if (ret) 550 return ret; 551 552 ops.ooblen = buf.length; 553 ops.ooboffs = buf.start & (mtd->oobsize - 1); 554 ops.datbuf = NULL; 555 ops.mode = MTD_OOB_PLACE; 556 557 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 558 return -EINVAL; 559 560 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 561 if (!ops.oobbuf) 562 return -ENOMEM; 563 564 buf.start &= ~(mtd->oobsize - 1); 565 ret = mtd->read_oob(mtd, buf.start, &ops); 566 567 if (put_user(ops.oobretlen, (uint32_t __user *)argp)) 568 ret = -EFAULT; 569 else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf, 570 ops.oobretlen)) 571 ret = -EFAULT; 572 573 kfree(ops.oobbuf); 574 break; 575 } 576 577 case MEMLOCK: 578 { 579 struct erase_info_user info; 580 581 if (copy_from_user(&info, argp, sizeof(info))) 582 return -EFAULT; 583 584 if (!mtd->lock) 585 ret = -EOPNOTSUPP; 586 else 587 ret = mtd->lock(mtd, info.start, info.length); 588 break; 589 } 590 591 case MEMUNLOCK: 592 { 593 struct erase_info_user info; 594 595 if (copy_from_user(&info, argp, sizeof(info))) 596 return -EFAULT; 597 598 if (!mtd->unlock) 599 ret = -EOPNOTSUPP; 600 else 601 ret = mtd->unlock(mtd, info.start, info.length); 602 break; 603 } 604 605 /* Legacy interface */ 606 case MEMGETOOBSEL: 607 { 608 struct nand_oobinfo oi; 609 610 if (!mtd->ecclayout) 611 return -EOPNOTSUPP; 612 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) 613 return -EINVAL; 614 615 oi.useecc = MTD_NANDECC_AUTOPLACE; 616 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); 617 memcpy(&oi.oobfree, mtd->ecclayout->oobfree, 618 sizeof(oi.oobfree)); 619 oi.eccbytes = mtd->ecclayout->eccbytes; 620 621 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 622 return -EFAULT; 623 break; 624 } 625 626 case MEMGETBADBLOCK: 627 { 628 loff_t offs; 629 630 if (copy_from_user(&offs, argp, sizeof(loff_t))) 631 return -EFAULT; 632 if (!mtd->block_isbad) 633 ret = -EOPNOTSUPP; 634 else 635 return mtd->block_isbad(mtd, offs); 636 break; 637 } 638 639 case MEMSETBADBLOCK: 640 { 641 loff_t offs; 642 643 if (copy_from_user(&offs, argp, sizeof(loff_t))) 644 return -EFAULT; 645 if (!mtd->block_markbad) 646 ret = -EOPNOTSUPP; 647 else 648 return mtd->block_markbad(mtd, offs); 649 break; 650 } 651 652 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 653 case OTPSELECT: 654 { 655 int mode; 656 if (copy_from_user(&mode, argp, sizeof(int))) 657 return -EFAULT; 658 659 mfi->mode = MTD_MODE_NORMAL; 660 661 ret = otp_select_filemode(mfi, mode); 662 663 file->f_pos = 0; 664 break; 665 } 666 667 case OTPGETREGIONCOUNT: 668 case OTPGETREGIONINFO: 669 { 670 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 671 if (!buf) 672 return -ENOMEM; 673 ret = -EOPNOTSUPP; 674 switch (mfi->mode) { 675 case MTD_MODE_OTP_FACTORY: 676 if (mtd->get_fact_prot_info) 677 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 678 break; 679 case MTD_MODE_OTP_USER: 680 if (mtd->get_user_prot_info) 681 ret = mtd->get_user_prot_info(mtd, buf, 4096); 682 break; 683 default: 684 break; 685 } 686 if (ret >= 0) { 687 if (cmd == OTPGETREGIONCOUNT) { 688 int nbr = ret / sizeof(struct otp_info); 689 ret = copy_to_user(argp, &nbr, sizeof(int)); 690 } else 691 ret = copy_to_user(argp, buf, ret); 692 if (ret) 693 ret = -EFAULT; 694 } 695 kfree(buf); 696 break; 697 } 698 699 case OTPLOCK: 700 { 701 struct otp_info info; 702 703 if (mfi->mode != MTD_MODE_OTP_USER) 704 return -EINVAL; 705 if (copy_from_user(&info, argp, sizeof(info))) 706 return -EFAULT; 707 if (!mtd->lock_user_prot_reg) 708 return -EOPNOTSUPP; 709 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length); 710 break; 711 } 712 #endif 713 714 case ECCGETLAYOUT: 715 { 716 if (!mtd->ecclayout) 717 return -EOPNOTSUPP; 718 719 if (copy_to_user(argp, mtd->ecclayout, 720 sizeof(struct nand_ecclayout))) 721 return -EFAULT; 722 break; 723 } 724 725 case ECCGETSTATS: 726 { 727 if (copy_to_user(argp, &mtd->ecc_stats, 728 sizeof(struct mtd_ecc_stats))) 729 return -EFAULT; 730 break; 731 } 732 733 case MTDFILEMODE: 734 { 735 mfi->mode = 0; 736 737 switch(arg) { 738 case MTD_MODE_OTP_FACTORY: 739 case MTD_MODE_OTP_USER: 740 ret = otp_select_filemode(mfi, arg); 741 break; 742 743 case MTD_MODE_RAW: 744 if (!mtd->read_oob || !mtd->write_oob) 745 return -EOPNOTSUPP; 746 mfi->mode = arg; 747 748 case MTD_MODE_NORMAL: 749 break; 750 default: 751 ret = -EINVAL; 752 } 753 file->f_pos = 0; 754 break; 755 } 756 757 default: 758 ret = -ENOTTY; 759 } 760 761 return ret; 762 } /* memory_ioctl */ 763 764 static const struct file_operations mtd_fops = { 765 .owner = THIS_MODULE, 766 .llseek = mtd_lseek, 767 .read = mtd_read, 768 .write = mtd_write, 769 .ioctl = mtd_ioctl, 770 .open = mtd_open, 771 .release = mtd_close, 772 }; 773 774 static int __init init_mtdchar(void) 775 { 776 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) { 777 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", 778 MTD_CHAR_MAJOR); 779 return -EAGAIN; 780 } 781 782 mtd_class = class_create(THIS_MODULE, "mtd"); 783 784 if (IS_ERR(mtd_class)) { 785 printk(KERN_ERR "Error creating mtd class.\n"); 786 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 787 return PTR_ERR(mtd_class); 788 } 789 790 register_mtd_user(¬ifier); 791 return 0; 792 } 793 794 static void __exit cleanup_mtdchar(void) 795 { 796 unregister_mtd_user(¬ifier); 797 class_destroy(mtd_class); 798 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 799 } 800 801 module_init(init_mtdchar); 802 module_exit(cleanup_mtdchar); 803 804 805 MODULE_LICENSE("GPL"); 806 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 807 MODULE_DESCRIPTION("Direct character-device access to MTD devices"); 808