1 /* 2 * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $ 3 * 4 * Character-device access to raw MTD devices. 5 * 6 */ 7 8 #include <linux/config.h> 9 #include <linux/device.h> 10 #include <linux/fs.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/sched.h> 16 17 #include <linux/mtd/mtd.h> 18 #include <linux/mtd/compatmac.h> 19 20 #include <asm/uaccess.h> 21 22 static struct class *mtd_class; 23 24 static void mtd_notify_add(struct mtd_info* mtd) 25 { 26 if (!mtd) 27 return; 28 29 class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), 30 NULL, "mtd%d", mtd->index); 31 32 class_device_create(mtd_class, NULL, 33 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), 34 NULL, "mtd%dro", mtd->index); 35 } 36 37 static void mtd_notify_remove(struct mtd_info* mtd) 38 { 39 if (!mtd) 40 return; 41 42 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2)); 43 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1)); 44 } 45 46 static struct mtd_notifier notifier = { 47 .add = mtd_notify_add, 48 .remove = mtd_notify_remove, 49 }; 50 51 /* 52 * Data structure to hold the pointer to the mtd device as well 53 * as mode information ofr various use cases. 54 */ 55 struct mtd_file_info { 56 struct mtd_info *mtd; 57 enum mtd_file_modes mode; 58 }; 59 60 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 61 { 62 struct mtd_file_info *mfi = file->private_data; 63 struct mtd_info *mtd = mfi->mtd; 64 65 switch (orig) { 66 case 0: 67 /* SEEK_SET */ 68 break; 69 case 1: 70 /* SEEK_CUR */ 71 offset += file->f_pos; 72 break; 73 case 2: 74 /* SEEK_END */ 75 offset += mtd->size; 76 break; 77 default: 78 return -EINVAL; 79 } 80 81 if (offset >= 0 && offset <= mtd->size) 82 return file->f_pos = offset; 83 84 return -EINVAL; 85 } 86 87 88 89 static int mtd_open(struct inode *inode, struct file *file) 90 { 91 int minor = iminor(inode); 92 int devnum = minor >> 1; 93 struct mtd_info *mtd; 94 struct mtd_file_info *mfi; 95 96 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 97 98 if (devnum >= MAX_MTD_DEVICES) 99 return -ENODEV; 100 101 /* You can't open the RO devices RW */ 102 if ((file->f_mode & 2) && (minor & 1)) 103 return -EACCES; 104 105 mtd = get_mtd_device(NULL, devnum); 106 107 if (!mtd) 108 return -ENODEV; 109 110 if (MTD_ABSENT == mtd->type) { 111 put_mtd_device(mtd); 112 return -ENODEV; 113 } 114 115 /* You can't open it RW if it's not a writeable device */ 116 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { 117 put_mtd_device(mtd); 118 return -EACCES; 119 } 120 121 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 122 if (!mfi) { 123 put_mtd_device(mtd); 124 return -ENOMEM; 125 } 126 mfi->mtd = mtd; 127 file->private_data = mfi; 128 129 return 0; 130 } /* mtd_open */ 131 132 /*====================================================================*/ 133 134 static int mtd_close(struct inode *inode, struct file *file) 135 { 136 struct mtd_file_info *mfi = file->private_data; 137 struct mtd_info *mtd = mfi->mtd; 138 139 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 140 141 if (mtd->sync) 142 mtd->sync(mtd); 143 144 put_mtd_device(mtd); 145 file->private_data = NULL; 146 kfree(mfi); 147 148 return 0; 149 } /* mtd_close */ 150 151 /* FIXME: This _really_ needs to die. In 2.5, we should lock the 152 userspace buffer down and use it directly with readv/writev. 153 */ 154 #define MAX_KMALLOC_SIZE 0x20000 155 156 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 157 { 158 struct mtd_file_info *mfi = file->private_data; 159 struct mtd_info *mtd = mfi->mtd; 160 size_t retlen=0; 161 size_t total_retlen=0; 162 int ret=0; 163 int len; 164 char *kbuf; 165 166 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 167 168 if (*ppos + count > mtd->size) 169 count = mtd->size - *ppos; 170 171 if (!count) 172 return 0; 173 174 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 175 and pass them directly to the MTD functions */ 176 177 if (count > MAX_KMALLOC_SIZE) 178 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 179 else 180 kbuf=kmalloc(count, GFP_KERNEL); 181 182 if (!kbuf) 183 return -ENOMEM; 184 185 while (count) { 186 187 if (count > MAX_KMALLOC_SIZE) 188 len = MAX_KMALLOC_SIZE; 189 else 190 len = count; 191 192 switch (mfi->mode) { 193 case MTD_MODE_OTP_FACTORY: 194 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 195 break; 196 case MTD_MODE_OTP_USER: 197 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 198 break; 199 case MTD_MODE_RAW: 200 { 201 struct mtd_oob_ops ops; 202 203 ops.mode = MTD_OOB_RAW; 204 ops.datbuf = kbuf; 205 ops.oobbuf = NULL; 206 ops.len = len; 207 208 ret = mtd->read_oob(mtd, *ppos, &ops); 209 retlen = ops.retlen; 210 break; 211 } 212 default: 213 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 214 } 215 /* Nand returns -EBADMSG on ecc errors, but it returns 216 * the data. For our userspace tools it is important 217 * to dump areas with ecc errors ! 218 * For kernel internal usage it also might return -EUCLEAN 219 * to signal the caller that a bitflip has occured and has 220 * been corrected by the ECC algorithm. 221 * Userspace software which accesses NAND this way 222 * must be aware of the fact that it deals with NAND 223 */ 224 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { 225 *ppos += retlen; 226 if (copy_to_user(buf, kbuf, retlen)) { 227 kfree(kbuf); 228 return -EFAULT; 229 } 230 else 231 total_retlen += retlen; 232 233 count -= retlen; 234 buf += retlen; 235 if (retlen == 0) 236 count = 0; 237 } 238 else { 239 kfree(kbuf); 240 return ret; 241 } 242 243 } 244 245 kfree(kbuf); 246 return total_retlen; 247 } /* mtd_read */ 248 249 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 250 { 251 struct mtd_file_info *mfi = file->private_data; 252 struct mtd_info *mtd = mfi->mtd; 253 char *kbuf; 254 size_t retlen; 255 size_t total_retlen=0; 256 int ret=0; 257 int len; 258 259 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 260 261 if (*ppos == mtd->size) 262 return -ENOSPC; 263 264 if (*ppos + count > mtd->size) 265 count = mtd->size - *ppos; 266 267 if (!count) 268 return 0; 269 270 if (count > MAX_KMALLOC_SIZE) 271 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL); 272 else 273 kbuf=kmalloc(count, GFP_KERNEL); 274 275 if (!kbuf) 276 return -ENOMEM; 277 278 while (count) { 279 280 if (count > MAX_KMALLOC_SIZE) 281 len = MAX_KMALLOC_SIZE; 282 else 283 len = count; 284 285 if (copy_from_user(kbuf, buf, len)) { 286 kfree(kbuf); 287 return -EFAULT; 288 } 289 290 switch (mfi->mode) { 291 case MTD_MODE_OTP_FACTORY: 292 ret = -EROFS; 293 break; 294 case MTD_MODE_OTP_USER: 295 if (!mtd->write_user_prot_reg) { 296 ret = -EOPNOTSUPP; 297 break; 298 } 299 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 300 break; 301 302 case MTD_MODE_RAW: 303 { 304 struct mtd_oob_ops ops; 305 306 ops.mode = MTD_OOB_RAW; 307 ops.datbuf = kbuf; 308 ops.oobbuf = NULL; 309 ops.len = len; 310 311 ret = mtd->write_oob(mtd, *ppos, &ops); 312 retlen = ops.retlen; 313 break; 314 } 315 316 default: 317 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 318 } 319 if (!ret) { 320 *ppos += retlen; 321 total_retlen += retlen; 322 count -= retlen; 323 buf += retlen; 324 } 325 else { 326 kfree(kbuf); 327 return ret; 328 } 329 } 330 331 kfree(kbuf); 332 return total_retlen; 333 } /* mtd_write */ 334 335 /*====================================================================== 336 337 IOCTL calls for getting device parameters. 338 339 ======================================================================*/ 340 static void mtdchar_erase_callback (struct erase_info *instr) 341 { 342 wake_up((wait_queue_head_t *)instr->priv); 343 } 344 345 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 346 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 347 { 348 struct mtd_info *mtd = mfi->mtd; 349 int ret = 0; 350 351 switch (mode) { 352 case MTD_OTP_FACTORY: 353 if (!mtd->read_fact_prot_reg) 354 ret = -EOPNOTSUPP; 355 else 356 mfi->mode = MTD_MODE_OTP_FACTORY; 357 break; 358 case MTD_OTP_USER: 359 if (!mtd->read_fact_prot_reg) 360 ret = -EOPNOTSUPP; 361 else 362 mfi->mode = MTD_MODE_OTP_USER; 363 break; 364 default: 365 ret = -EINVAL; 366 case MTD_OTP_OFF: 367 break; 368 } 369 return ret; 370 } 371 #else 372 # define otp_select_filemode(f,m) -EOPNOTSUPP 373 #endif 374 375 static int mtd_ioctl(struct inode *inode, struct file *file, 376 u_int cmd, u_long arg) 377 { 378 struct mtd_file_info *mfi = file->private_data; 379 struct mtd_info *mtd = mfi->mtd; 380 void __user *argp = (void __user *)arg; 381 int ret = 0; 382 u_long size; 383 struct mtd_info_user info; 384 385 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 386 387 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 388 if (cmd & IOC_IN) { 389 if (!access_ok(VERIFY_READ, argp, size)) 390 return -EFAULT; 391 } 392 if (cmd & IOC_OUT) { 393 if (!access_ok(VERIFY_WRITE, argp, size)) 394 return -EFAULT; 395 } 396 397 switch (cmd) { 398 case MEMGETREGIONCOUNT: 399 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 400 return -EFAULT; 401 break; 402 403 case MEMGETREGIONINFO: 404 { 405 struct region_info_user ur; 406 407 if (copy_from_user(&ur, argp, sizeof(struct region_info_user))) 408 return -EFAULT; 409 410 if (ur.regionindex >= mtd->numeraseregions) 411 return -EINVAL; 412 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]), 413 sizeof(struct mtd_erase_region_info))) 414 return -EFAULT; 415 break; 416 } 417 418 case MEMGETINFO: 419 info.type = mtd->type; 420 info.flags = mtd->flags; 421 info.size = mtd->size; 422 info.erasesize = mtd->erasesize; 423 info.writesize = mtd->writesize; 424 info.oobsize = mtd->oobsize; 425 info.ecctype = mtd->ecctype; 426 info.eccsize = mtd->eccsize; 427 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 428 return -EFAULT; 429 break; 430 431 case MEMERASE: 432 { 433 struct erase_info *erase; 434 435 if(!(file->f_mode & 2)) 436 return -EPERM; 437 438 erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL); 439 if (!erase) 440 ret = -ENOMEM; 441 else { 442 wait_queue_head_t waitq; 443 DECLARE_WAITQUEUE(wait, current); 444 445 init_waitqueue_head(&waitq); 446 447 memset (erase,0,sizeof(struct erase_info)); 448 if (copy_from_user(&erase->addr, argp, 449 sizeof(struct erase_info_user))) { 450 kfree(erase); 451 return -EFAULT; 452 } 453 erase->mtd = mtd; 454 erase->callback = mtdchar_erase_callback; 455 erase->priv = (unsigned long)&waitq; 456 457 /* 458 FIXME: Allow INTERRUPTIBLE. Which means 459 not having the wait_queue head on the stack. 460 461 If the wq_head is on the stack, and we 462 leave because we got interrupted, then the 463 wq_head is no longer there when the 464 callback routine tries to wake us up. 465 */ 466 ret = mtd->erase(mtd, erase); 467 if (!ret) { 468 set_current_state(TASK_UNINTERRUPTIBLE); 469 add_wait_queue(&waitq, &wait); 470 if (erase->state != MTD_ERASE_DONE && 471 erase->state != MTD_ERASE_FAILED) 472 schedule(); 473 remove_wait_queue(&waitq, &wait); 474 set_current_state(TASK_RUNNING); 475 476 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 477 } 478 kfree(erase); 479 } 480 break; 481 } 482 483 case MEMWRITEOOB: 484 { 485 struct mtd_oob_buf buf; 486 struct mtd_oob_ops ops; 487 488 if(!(file->f_mode & 2)) 489 return -EPERM; 490 491 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 492 return -EFAULT; 493 494 if (buf.length > 4096) 495 return -EINVAL; 496 497 if (!mtd->write_oob) 498 ret = -EOPNOTSUPP; 499 else 500 ret = access_ok(VERIFY_READ, buf.ptr, 501 buf.length) ? 0 : EFAULT; 502 503 if (ret) 504 return ret; 505 506 ops.len = buf.length; 507 ops.ooblen = buf.length; 508 ops.ooboffs = buf.start & (mtd->oobsize - 1); 509 ops.datbuf = NULL; 510 ops.mode = MTD_OOB_PLACE; 511 512 if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs)) 513 return -EINVAL; 514 515 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 516 if (!ops.oobbuf) 517 return -ENOMEM; 518 519 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) { 520 kfree(ops.oobbuf); 521 return -EFAULT; 522 } 523 524 buf.start &= ~(mtd->oobsize - 1); 525 ret = mtd->write_oob(mtd, buf.start, &ops); 526 527 if (copy_to_user(argp + sizeof(uint32_t), &ops.retlen, 528 sizeof(uint32_t))) 529 ret = -EFAULT; 530 531 kfree(ops.oobbuf); 532 break; 533 534 } 535 536 case MEMREADOOB: 537 { 538 struct mtd_oob_buf buf; 539 struct mtd_oob_ops ops; 540 541 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 542 return -EFAULT; 543 544 if (buf.length > 4096) 545 return -EINVAL; 546 547 if (!mtd->read_oob) 548 ret = -EOPNOTSUPP; 549 else 550 ret = access_ok(VERIFY_WRITE, buf.ptr, 551 buf.length) ? 0 : -EFAULT; 552 if (ret) 553 return ret; 554 555 ops.len = buf.length; 556 ops.ooblen = buf.length; 557 ops.ooboffs = buf.start & (mtd->oobsize - 1); 558 ops.datbuf = NULL; 559 ops.mode = MTD_OOB_PLACE; 560 561 if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs)) 562 return -EINVAL; 563 564 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL); 565 if (!ops.oobbuf) 566 return -ENOMEM; 567 568 buf.start &= ~(mtd->oobsize - 1); 569 ret = mtd->read_oob(mtd, buf.start, &ops); 570 571 if (put_user(ops.retlen, (uint32_t __user *)argp)) 572 ret = -EFAULT; 573 else if (ops.retlen && copy_to_user(buf.ptr, ops.oobbuf, 574 ops.retlen)) 575 ret = -EFAULT; 576 577 kfree(ops.oobbuf); 578 break; 579 } 580 581 case MEMLOCK: 582 { 583 struct erase_info_user info; 584 585 if (copy_from_user(&info, argp, sizeof(info))) 586 return -EFAULT; 587 588 if (!mtd->lock) 589 ret = -EOPNOTSUPP; 590 else 591 ret = mtd->lock(mtd, info.start, info.length); 592 break; 593 } 594 595 case MEMUNLOCK: 596 { 597 struct erase_info_user info; 598 599 if (copy_from_user(&info, argp, sizeof(info))) 600 return -EFAULT; 601 602 if (!mtd->unlock) 603 ret = -EOPNOTSUPP; 604 else 605 ret = mtd->unlock(mtd, info.start, info.length); 606 break; 607 } 608 609 /* Legacy interface */ 610 case MEMGETOOBSEL: 611 { 612 struct nand_oobinfo oi; 613 614 if (!mtd->ecclayout) 615 return -EOPNOTSUPP; 616 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) 617 return -EINVAL; 618 619 oi.useecc = MTD_NANDECC_AUTOPLACE; 620 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); 621 memcpy(&oi.oobfree, mtd->ecclayout->oobfree, 622 sizeof(oi.oobfree)); 623 624 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 625 return -EFAULT; 626 break; 627 } 628 629 case MEMGETBADBLOCK: 630 { 631 loff_t offs; 632 633 if (copy_from_user(&offs, argp, sizeof(loff_t))) 634 return -EFAULT; 635 if (!mtd->block_isbad) 636 ret = -EOPNOTSUPP; 637 else 638 return mtd->block_isbad(mtd, offs); 639 break; 640 } 641 642 case MEMSETBADBLOCK: 643 { 644 loff_t offs; 645 646 if (copy_from_user(&offs, argp, sizeof(loff_t))) 647 return -EFAULT; 648 if (!mtd->block_markbad) 649 ret = -EOPNOTSUPP; 650 else 651 return mtd->block_markbad(mtd, offs); 652 break; 653 } 654 655 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP) 656 case OTPSELECT: 657 { 658 int mode; 659 if (copy_from_user(&mode, argp, sizeof(int))) 660 return -EFAULT; 661 662 mfi->mode = MTD_MODE_NORMAL; 663 664 ret = otp_select_filemode(mfi, mode); 665 666 file->f_pos = 0; 667 break; 668 } 669 670 case OTPGETREGIONCOUNT: 671 case OTPGETREGIONINFO: 672 { 673 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 674 if (!buf) 675 return -ENOMEM; 676 ret = -EOPNOTSUPP; 677 switch (mfi->mode) { 678 case MTD_MODE_OTP_FACTORY: 679 if (mtd->get_fact_prot_info) 680 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 681 break; 682 case MTD_MODE_OTP_USER: 683 if (mtd->get_user_prot_info) 684 ret = mtd->get_user_prot_info(mtd, buf, 4096); 685 break; 686 default: 687 break; 688 } 689 if (ret >= 0) { 690 if (cmd == OTPGETREGIONCOUNT) { 691 int nbr = ret / sizeof(struct otp_info); 692 ret = copy_to_user(argp, &nbr, sizeof(int)); 693 } else 694 ret = copy_to_user(argp, buf, ret); 695 if (ret) 696 ret = -EFAULT; 697 } 698 kfree(buf); 699 break; 700 } 701 702 case OTPLOCK: 703 { 704 struct otp_info info; 705 706 if (mfi->mode != MTD_MODE_OTP_USER) 707 return -EINVAL; 708 if (copy_from_user(&info, argp, sizeof(info))) 709 return -EFAULT; 710 if (!mtd->lock_user_prot_reg) 711 return -EOPNOTSUPP; 712 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length); 713 break; 714 } 715 #endif 716 717 case ECCGETLAYOUT: 718 { 719 if (!mtd->ecclayout) 720 return -EOPNOTSUPP; 721 722 if (copy_to_user(argp, &mtd->ecclayout, 723 sizeof(struct nand_ecclayout))) 724 return -EFAULT; 725 break; 726 } 727 728 case ECCGETSTATS: 729 { 730 if (copy_to_user(argp, &mtd->ecc_stats, 731 sizeof(struct mtd_ecc_stats))) 732 return -EFAULT; 733 break; 734 } 735 736 case MTDFILEMODE: 737 { 738 mfi->mode = 0; 739 740 switch(arg) { 741 case MTD_MODE_OTP_FACTORY: 742 case MTD_MODE_OTP_USER: 743 ret = otp_select_filemode(mfi, arg); 744 break; 745 746 case MTD_MODE_RAW: 747 if (!mtd->read_oob || !mtd->write_oob) 748 return -EOPNOTSUPP; 749 mfi->mode = arg; 750 751 case MTD_MODE_NORMAL: 752 break; 753 default: 754 ret = -EINVAL; 755 } 756 file->f_pos = 0; 757 break; 758 } 759 760 default: 761 ret = -ENOTTY; 762 } 763 764 return ret; 765 } /* memory_ioctl */ 766 767 static struct file_operations mtd_fops = { 768 .owner = THIS_MODULE, 769 .llseek = mtd_lseek, 770 .read = mtd_read, 771 .write = mtd_write, 772 .ioctl = mtd_ioctl, 773 .open = mtd_open, 774 .release = mtd_close, 775 }; 776 777 static int __init init_mtdchar(void) 778 { 779 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) { 780 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", 781 MTD_CHAR_MAJOR); 782 return -EAGAIN; 783 } 784 785 mtd_class = class_create(THIS_MODULE, "mtd"); 786 787 if (IS_ERR(mtd_class)) { 788 printk(KERN_ERR "Error creating mtd class.\n"); 789 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 790 return PTR_ERR(mtd_class); 791 } 792 793 register_mtd_user(¬ifier); 794 return 0; 795 } 796 797 static void __exit cleanup_mtdchar(void) 798 { 799 unregister_mtd_user(¬ifier); 800 class_destroy(mtd_class); 801 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 802 } 803 804 module_init(init_mtdchar); 805 module_exit(cleanup_mtdchar); 806 807 808 MODULE_LICENSE("GPL"); 809 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 810 MODULE_DESCRIPTION("Direct character-device access to MTD devices"); 811