1 /* 2 * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $ 3 * 4 * Character-device access to raw MTD devices. 5 * 6 */ 7 8 #include <linux/device.h> 9 #include <linux/fs.h> 10 #include <linux/err.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/sched.h> 16 17 #include <linux/mtd/mtd.h> 18 #include <linux/mtd/compatmac.h> 19 20 #include <asm/uaccess.h> 21 22 static struct class *mtd_class; 23 24 static void mtd_notify_add(struct mtd_info* mtd) 25 { 26 if (!mtd) 27 return; 28 29 class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), 30 NULL, "mtd%d", mtd->index); 31 32 class_device_create(mtd_class, NULL, 33 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), 34 NULL, "mtd%dro", mtd->index); 35 } 36 37 static void mtd_notify_remove(struct mtd_info* mtd) 38 { 39 if (!mtd) 40 return; 41 42 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2)); 43 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1)); 44 } 45 46 static struct mtd_notifier notifier = { 47 .add = mtd_notify_add, 48 .remove = mtd_notify_remove, 49 }; 50 51 /* 52 * Data structure to hold the pointer to the mtd device as well 53 * as mode information ofr various use cases. 54 */ 55 struct mtd_file_info { 56 struct mtd_info *mtd; 57 enum mtd_file_modes mode; 58 }; 59 60 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 61 { 62 struct mtd_file_info *mfi = file->private_data; 63 struct mtd_info *mtd = mfi->mtd; 64 65 switch (orig) { 66 case SEEK_SET: 67 break; 68 case SEEK_CUR: 69 offset += file->f_pos; 70 break; 71 case SEEK_END: 72 offset += mtd->size; 73 break; 74 default: 75 return -EINVAL; 76 } 77 78 if (offset >= 0 && offset <= mtd->size) 79 return file->f_pos = offset; 80 81 return -EINVAL; 82 } 83 84 85 86 static int mtd_open(struct inode *inode, struct file *file) 87 { 88 int minor = iminor(inode); 89 int devnum = minor >> 1; 90 struct mtd_info *mtd; 91 struct mtd_file_info *mfi; 92 93 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 94 95 if (devnum >= MAX_MTD_DEVICES) 96 return -ENODEV; 97 98 /* You can't open the RO devices RW */ 99 if ((file->f_mode & 2) && (minor & 1)) 100 return -EACCES; 101 102 mtd = get_mtd_device(NULL, devnum); 103 104 if (IS_ERR(mtd)) 105 return PTR_ERR(mtd); 106 107 if (MTD_ABSENT == mtd->type) { 108 put_mtd_device(mtd); 109 return -ENODEV; 110 } 111 112 /* You can't open it RW if it's not a writeable device */ 113 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { 114 put_mtd_device(mtd); 115 return -EACCES; 116 } 117 118 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 119 if (!mfi) { 120 put_mtd_device(mtd); 121 return -ENOMEM; 122 } 123 mfi->mtd = mtd; 124 file->private_data = mfi; 125 126 return 0; 127 } /* mtd_open */ 128 129 /*====================================================================*/ 130 131 static int mtd_close(struct inode *inode, struct file *file) 132 { 133 struct mtd_file_info *mfi = file->private_data; 134 struct mtd_info *mtd = mfi->mtd; 135 136 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 137 138 if (mtd->sync) 139 mtd->sync(mtd); 140 141 put_mtd_device(mtd); 142 file->private_data = NULL; 143 kfree(mfi); 144 145 return 0; 146 } /* mtd_close */ 147 148 /* FIXME: This _really_ needs to die. In 2.5, we should lock the 149 userspace buffer down and use it directly with readv/writev. 150 */ 151 #define MAX_KMALLOC_SIZE 0x20000 152 153 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 154 { 155 struct mtd_file_info *mfi = file->private_data; 156 struct mtd_info *mtd = mfi->mtd; 157 size_t retlen=0; 158 size_t total_retlen=0; 159 int ret=0; 160 int len; 161 char *kbuf; 162 163 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 164 165 if (*ppos + count > mtd->size) 166 count = mtd->size - *ppos; 167 168 if (!count) 169 return 0; 170 171 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 172 and pass them directly to the MTD functions */ 173 174 if (count > MAX_KMALLOC_SIZE) 175 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 176 else 177 kbuf=kmalloc(count, GFP_KERNEL); 178 179 if (!kbuf) 180 return -ENOMEM; 181 182 while (count) { 183 184 if (count > MAX_KMALLOC_SIZE) 185 len = MAX_KMALLOC_SIZE; 186 else 187 len = count; 188 189 switch (mfi->mode) { 190 case MTD_MODE_OTP_FACTORY: 191 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 192 break; 193 case MTD_MODE_OTP_USER: 194 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 195 break; 196 case MTD_MODE_RAW: 197 { 198 struct mtd_oob_ops ops; 199 200 ops.mode = MTD_OOB_RAW; 201 ops.datbuf = kbuf; 202 ops.oobbuf = NULL; 203 ops.len = len; 204 205 ret = mtd->read_oob(mtd, *ppos, &ops); 206 retlen = ops.retlen; 207 break; 208 } 209 default: 210 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 211 } 212 /* Nand returns -EBADMSG on ecc errors, but it returns 213 * the data. For our userspace tools it is important 214 * to dump areas with ecc errors ! 215 * For kernel internal usage it also might return -EUCLEAN 216 * to signal the caller that a bitflip has occured and has 217 * been corrected by the ECC algorithm. 218 * Userspace software which accesses NAND this way 219 * must be aware of the fact that it deals with NAND 220 */ 221 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { 222 *ppos += retlen; 223 if (copy_to_user(buf, kbuf, retlen)) { 224 kfree(kbuf); 225 return -EFAULT; 226 } 227 else 228 total_retlen += retlen; 229 230 count -= retlen; 231 buf += retlen; 232 if (retlen == 0) 233 count = 0; 234 } 235 else { 236 kfree(kbuf); 237 return ret; 238 } 239 240 } 241 242 kfree(kbuf); 243 return total_retlen; 244 } /* mtd_read */ 245 246 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 247 { 248 struct mtd_file_info *mfi = file->private_data; 249 struct mtd_info *mtd = mfi->mtd; 250 char *kbuf; 251 size_t retlen; 252 size_t total_retlen=0; 253 int ret=0; 254 int len; 255 256 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 257 258 if (*ppos == mtd->size) 259 return -ENOSPC; 260 261 if (*ppos + count > mtd->size) 262 count = mtd->size - *ppos; 263 264 if (!count) 265 return 0; 266 267 if (count > MAX_KMALLOC_SIZE) 268 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 269 else 270 kbuf=kmalloc(count, GFP_KERNEL); 271 272 if (!kbuf) 273 return -ENOMEM; 274 275 while (count) { 276 277 if (count > MAX_KMALLOC_SIZE) 278 len = MAX_KMALLOC_SIZE; 279 else 280 len = count; 281 282 if (copy_from_user(kbuf, buf, len)) { 283 kfree(kbuf); 284 return -EFAULT; 285 } 286 287 switch (mfi->mode) { 288 case MTD_MODE_OTP_FACTORY: 289 ret = -EROFS; 290 break; 291 case MTD_MODE_OTP_USER: 292 if (!mtd->write_user_prot_reg) { 293 ret = -EOPNOTSUPP; 294 break; 295 } 296 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 297 break; 298 299 case MTD_MODE_RAW: 300 { 301 struct mtd_oob_ops ops; 302 303 ops.mode = MTD_OOB_RAW; 304 ops.datbuf = kbuf; 305 ops.oobbuf = NULL; 306 ops.len = len; 307 308 ret = mtd->write_oob(mtd, *ppos, &ops); 309 retlen = ops.retlen; 310 break; 311 } 312 313 default: 314 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 315 } 316 if (!ret) { 317 *ppos += retlen; 318 total_retlen += retlen; 319 count -= retlen; 320 buf += retlen; 321 } 322 else { 323 kfree(kbuf); 324 return ret; 325 } 326 } 327 328 kfree(kbuf); 329 return total_retlen; 330 } /* mtd_write */ 331 332 /*====================================================================== 333 334 IOCTL calls for getting device parameters. 335 336 ======================================================================*/ 337 static void mtdchar_erase_callback (struct erase_info *instr) 338 { 339 wake_up((wait_queue_head_t *)instr->priv); 340 } 341 342 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 343 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 344 { 345 struct mtd_info *mtd = mfi->mtd; 346 int ret = 0; 347 348 switch (mode) { 349 case MTD_OTP_FACTORY: 350 if (!mtd->read_fact_prot_reg) 351 ret = -EOPNOTSUPP; 352 else 353 mfi->mode = MTD_MODE_OTP_FACTORY; 354 break; 355 case MTD_OTP_USER: 356 if (!mtd->read_fact_prot_reg) 357 ret = -EOPNOTSUPP; 358 else 359 mfi->mode = MTD_MODE_OTP_USER; 360 break; 361 default: 362 ret = -EINVAL; 363 case MTD_OTP_OFF: 364 break; 365 } 366 return ret; 367 } 368 #else 369 # define otp_select_filemode(f,m) -EOPNOTSUPP 370 #endif 371 372 static int mtd_ioctl(struct inode *inode, struct file *file, 373 u_int cmd, u_long arg) 374 { 375 struct mtd_file_info *mfi = file->private_data; 376 struct mtd_info *mtd = mfi->mtd; 377 void __user *argp = (void __user *)arg; 378 int ret = 0; 379 u_long size; 380 struct mtd_info_user info; 381 382 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 383 384 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 385 if (cmd & IOC_IN) { 386 if (!access_ok(VERIFY_READ, argp, size)) 387 return -EFAULT; 388 } 389 if (cmd & IOC_OUT) { 390 if (!access_ok(VERIFY_WRITE, argp, size)) 391 return -EFAULT; 392 } 393 394 switch (cmd) { 395 case MEMGETREGIONCOUNT: 396 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 397 return -EFAULT; 398 break; 399 400 case MEMGETREGIONINFO: 401 { 402 struct region_info_user ur; 403 404 if (copy_from_user(&ur, argp, sizeof(struct region_info_user))) 405 return -EFAULT; 406 407 if (ur.regionindex >= mtd->numeraseregions) 408 return -EINVAL; 409 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]), 410 sizeof(struct mtd_erase_region_info))) 411 return -EFAULT; 412 break; 413 } 414 415 case MEMGETINFO: 416 info.type = mtd->type; 417 info.flags = mtd->flags; 418 info.size = mtd->size; 419 info.erasesize = mtd->erasesize; 420 info.writesize = mtd->writesize; 421 info.oobsize = mtd->oobsize; 422 info.ecctype = mtd->ecctype; 423 info.eccsize = mtd->eccsize; 424 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 425 return -EFAULT; 426 break; 427 428 case MEMERASE: 429 { 430 struct erase_info *erase; 431 432 if(!(file->f_mode & 2)) 433 return -EPERM; 434 435 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 436 if (!erase) 437 ret = -ENOMEM; 438 else { 439 wait_queue_head_t waitq; 440 DECLARE_WAITQUEUE(wait, current); 441 442 init_waitqueue_head(&waitq); 443 444 if (copy_from_user(&erase->addr, argp, 445 sizeof(struct erase_info_user))) { 446 kfree(erase); 447 return -EFAULT; 448 } 449 erase->mtd = mtd; 450 erase->callback = mtdchar_erase_callback; 451 erase->priv = (unsigned long)&waitq; 452 453 /* 454 FIXME: Allow INTERRUPTIBLE. Which means 455 not having the wait_queue head on the stack. 456 457 If the wq_head is on the stack, and we 458 leave because we got interrupted, then the 459 wq_head is no longer there when the 460 callback routine tries to wake us up. 461 */ 462 ret = mtd->erase(mtd, erase); 463 if (!ret) { 464 set_current_state(TASK_UNINTERRUPTIBLE); 465 add_wait_queue(&waitq, &wait); 466 if (erase->state != MTD_ERASE_DONE && 467 erase->state != MTD_ERASE_FAILED) 468 schedule(); 469 remove_wait_queue(&waitq, &wait); 470 set_current_state(TASK_RUNNING); 471 472 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 473 } 474 kfree(erase); 475 } 476 break; 477 } 478 479 case MEMWRITEOOB: 480 { 481 struct mtd_oob_buf buf; 482 struct mtd_oob_ops ops; 483 484 if(!(file->f_mode & 2)) 485 return -EPERM; 486 487 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 488 return -EFAULT; 489 490 if (buf.length > 4096) 491 return -EINVAL; 492 493 if (!mtd->write_oob) 494 ret = -EOPNOTSUPP; 495 else 496 ret = access_ok(VERIFY_READ, buf.ptr, 497 buf.length) ? 0 : EFAULT; 498 499 if (ret) 500 return ret; 501 502 ops.ooblen = buf.length; 503 ops.ooboffs = buf.start & (mtd->oobsize - 1); 504 ops.datbuf = NULL; 505 ops.mode = MTD_OOB_PLACE; 506 507 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 508 return -EINVAL; 509 510 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 511 if (!ops.oobbuf) 512 return -ENOMEM; 513 514 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) { 515 kfree(ops.oobbuf); 516 return -EFAULT; 517 } 518 519 buf.start &= ~(mtd->oobsize - 1); 520 ret = mtd->write_oob(mtd, buf.start, &ops); 521 522 if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen, 523 sizeof(uint32_t))) 524 ret = -EFAULT; 525 526 kfree(ops.oobbuf); 527 break; 528 529 } 530 531 case MEMREADOOB: 532 { 533 struct mtd_oob_buf buf; 534 struct mtd_oob_ops ops; 535 536 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 537 return -EFAULT; 538 539 if (buf.length > 4096) 540 return -EINVAL; 541 542 if (!mtd->read_oob) 543 ret = -EOPNOTSUPP; 544 else 545 ret = access_ok(VERIFY_WRITE, buf.ptr, 546 buf.length) ? 0 : -EFAULT; 547 if (ret) 548 return ret; 549 550 ops.ooblen = buf.length; 551 ops.ooboffs = buf.start & (mtd->oobsize - 1); 552 ops.datbuf = NULL; 553 ops.mode = MTD_OOB_PLACE; 554 555 if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs)) 556 return -EINVAL; 557 558 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 559 if (!ops.oobbuf) 560 return -ENOMEM; 561 562 buf.start &= ~(mtd->oobsize - 1); 563 ret = mtd->read_oob(mtd, buf.start, &ops); 564 565 if (put_user(ops.oobretlen, (uint32_t __user *)argp)) 566 ret = -EFAULT; 567 else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf, 568 ops.oobretlen)) 569 ret = -EFAULT; 570 571 kfree(ops.oobbuf); 572 break; 573 } 574 575 case MEMLOCK: 576 { 577 struct erase_info_user info; 578 579 if (copy_from_user(&info, argp, sizeof(info))) 580 return -EFAULT; 581 582 if (!mtd->lock) 583 ret = -EOPNOTSUPP; 584 else 585 ret = mtd->lock(mtd, info.start, info.length); 586 break; 587 } 588 589 case MEMUNLOCK: 590 { 591 struct erase_info_user info; 592 593 if (copy_from_user(&info, argp, sizeof(info))) 594 return -EFAULT; 595 596 if (!mtd->unlock) 597 ret = -EOPNOTSUPP; 598 else 599 ret = mtd->unlock(mtd, info.start, info.length); 600 break; 601 } 602 603 /* Legacy interface */ 604 case MEMGETOOBSEL: 605 { 606 struct nand_oobinfo oi; 607 608 if (!mtd->ecclayout) 609 return -EOPNOTSUPP; 610 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) 611 return -EINVAL; 612 613 oi.useecc = MTD_NANDECC_AUTOPLACE; 614 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); 615 memcpy(&oi.oobfree, mtd->ecclayout->oobfree, 616 sizeof(oi.oobfree)); 617 oi.eccbytes = mtd->ecclayout->eccbytes; 618 619 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 620 return -EFAULT; 621 break; 622 } 623 624 case MEMGETBADBLOCK: 625 { 626 loff_t offs; 627 628 if (copy_from_user(&offs, argp, sizeof(loff_t))) 629 return -EFAULT; 630 if (!mtd->block_isbad) 631 ret = -EOPNOTSUPP; 632 else 633 return mtd->block_isbad(mtd, offs); 634 break; 635 } 636 637 case MEMSETBADBLOCK: 638 { 639 loff_t offs; 640 641 if (copy_from_user(&offs, argp, sizeof(loff_t))) 642 return -EFAULT; 643 if (!mtd->block_markbad) 644 ret = -EOPNOTSUPP; 645 else 646 return mtd->block_markbad(mtd, offs); 647 break; 648 } 649 650 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 651 case OTPSELECT: 652 { 653 int mode; 654 if (copy_from_user(&mode, argp, sizeof(int))) 655 return -EFAULT; 656 657 mfi->mode = MTD_MODE_NORMAL; 658 659 ret = otp_select_filemode(mfi, mode); 660 661 file->f_pos = 0; 662 break; 663 } 664 665 case OTPGETREGIONCOUNT: 666 case OTPGETREGIONINFO: 667 { 668 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 669 if (!buf) 670 return -ENOMEM; 671 ret = -EOPNOTSUPP; 672 switch (mfi->mode) { 673 case MTD_MODE_OTP_FACTORY: 674 if (mtd->get_fact_prot_info) 675 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 676 break; 677 case MTD_MODE_OTP_USER: 678 if (mtd->get_user_prot_info) 679 ret = mtd->get_user_prot_info(mtd, buf, 4096); 680 break; 681 default: 682 break; 683 } 684 if (ret >= 0) { 685 if (cmd == OTPGETREGIONCOUNT) { 686 int nbr = ret / sizeof(struct otp_info); 687 ret = copy_to_user(argp, &nbr, sizeof(int)); 688 } else 689 ret = copy_to_user(argp, buf, ret); 690 if (ret) 691 ret = -EFAULT; 692 } 693 kfree(buf); 694 break; 695 } 696 697 case OTPLOCK: 698 { 699 struct otp_info info; 700 701 if (mfi->mode != MTD_MODE_OTP_USER) 702 return -EINVAL; 703 if (copy_from_user(&info, argp, sizeof(info))) 704 return -EFAULT; 705 if (!mtd->lock_user_prot_reg) 706 return -EOPNOTSUPP; 707 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length); 708 break; 709 } 710 #endif 711 712 case ECCGETLAYOUT: 713 { 714 if (!mtd->ecclayout) 715 return -EOPNOTSUPP; 716 717 if (copy_to_user(argp, mtd->ecclayout, 718 sizeof(struct nand_ecclayout))) 719 return -EFAULT; 720 break; 721 } 722 723 case ECCGETSTATS: 724 { 725 if (copy_to_user(argp, &mtd->ecc_stats, 726 sizeof(struct mtd_ecc_stats))) 727 return -EFAULT; 728 break; 729 } 730 731 case MTDFILEMODE: 732 { 733 mfi->mode = 0; 734 735 switch(arg) { 736 case MTD_MODE_OTP_FACTORY: 737 case MTD_MODE_OTP_USER: 738 ret = otp_select_filemode(mfi, arg); 739 break; 740 741 case MTD_MODE_RAW: 742 if (!mtd->read_oob || !mtd->write_oob) 743 return -EOPNOTSUPP; 744 mfi->mode = arg; 745 746 case MTD_MODE_NORMAL: 747 break; 748 default: 749 ret = -EINVAL; 750 } 751 file->f_pos = 0; 752 break; 753 } 754 755 default: 756 ret = -ENOTTY; 757 } 758 759 return ret; 760 } /* memory_ioctl */ 761 762 static struct file_operations mtd_fops = { 763 .owner = THIS_MODULE, 764 .llseek = mtd_lseek, 765 .read = mtd_read, 766 .write = mtd_write, 767 .ioctl = mtd_ioctl, 768 .open = mtd_open, 769 .release = mtd_close, 770 }; 771 772 static int __init init_mtdchar(void) 773 { 774 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) { 775 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", 776 MTD_CHAR_MAJOR); 777 return -EAGAIN; 778 } 779 780 mtd_class = class_create(THIS_MODULE, "mtd"); 781 782 if (IS_ERR(mtd_class)) { 783 printk(KERN_ERR "Error creating mtd class.\n"); 784 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 785 return PTR_ERR(mtd_class); 786 } 787 788 register_mtd_user(¬ifier); 789 return 0; 790 } 791 792 static void __exit cleanup_mtdchar(void) 793 { 794 unregister_mtd_user(¬ifier); 795 class_destroy(mtd_class); 796 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 797 } 798 799 module_init(init_mtdchar); 800 module_exit(cleanup_mtdchar); 801 802 803 MODULE_LICENSE("GPL"); 804 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 805 MODULE_DESCRIPTION("Direct character-device access to MTD devices"); 806