1 /* 2 * $Id: mtdchar.c,v 1.73 2005/07/04 17:36:41 gleixner Exp $ 3 * 4 * Character-device access to raw MTD devices. 5 * 6 */ 7 8 #include <linux/config.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/mtd/mtd.h> 12 #include <linux/mtd/compatmac.h> 13 #include <linux/slab.h> 14 #include <linux/init.h> 15 #include <linux/fs.h> 16 #include <asm/uaccess.h> 17 18 #include <linux/device.h> 19 20 static struct class *mtd_class; 21 22 static void mtd_notify_add(struct mtd_info* mtd) 23 { 24 if (!mtd) 25 return; 26 27 class_device_create(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), 28 NULL, "mtd%d", mtd->index); 29 30 class_device_create(mtd_class, 31 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), 32 NULL, "mtd%dro", mtd->index); 33 } 34 35 static void mtd_notify_remove(struct mtd_info* mtd) 36 { 37 if (!mtd) 38 return; 39 40 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2)); 41 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1)); 42 } 43 44 static struct mtd_notifier notifier = { 45 .add = mtd_notify_add, 46 .remove = mtd_notify_remove, 47 }; 48 49 /* 50 * We use file->private_data to store a pointer to the MTDdevice. 51 * Since alighment is at least 32 bits, we have 2 bits free for OTP 52 * modes as well. 53 */ 54 55 #define TO_MTD(file) (struct mtd_info *)((long)((file)->private_data) & ~3L) 56 57 #define MTD_MODE_OTP_FACT 1 58 #define MTD_MODE_OTP_USER 2 59 #define MTD_MODE(file) ((long)((file)->private_data) & 3) 60 61 #define SET_MTD_MODE(file, mode) \ 62 do { long __p = (long)((file)->private_data); \ 63 (file)->private_data = (void *)((__p & ~3L) | mode); } while (0) 64 65 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 66 { 67 struct mtd_info *mtd = TO_MTD(file); 68 69 switch (orig) { 70 case 0: 71 /* SEEK_SET */ 72 file->f_pos = offset; 73 break; 74 case 1: 75 /* SEEK_CUR */ 76 file->f_pos += offset; 77 break; 78 case 2: 79 /* SEEK_END */ 80 file->f_pos =mtd->size + offset; 81 break; 82 default: 83 return -EINVAL; 84 } 85 86 if (file->f_pos < 0) 87 file->f_pos = 0; 88 else if (file->f_pos >= mtd->size) 89 file->f_pos = mtd->size - 1; 90 91 return file->f_pos; 92 } 93 94 95 96 static int mtd_open(struct inode *inode, struct file *file) 97 { 98 int minor = iminor(inode); 99 int devnum = minor >> 1; 100 struct mtd_info *mtd; 101 102 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 103 104 if (devnum >= MAX_MTD_DEVICES) 105 return -ENODEV; 106 107 /* You can't open the RO devices RW */ 108 if ((file->f_mode & 2) && (minor & 1)) 109 return -EACCES; 110 111 mtd = get_mtd_device(NULL, devnum); 112 113 if (!mtd) 114 return -ENODEV; 115 116 if (MTD_ABSENT == mtd->type) { 117 put_mtd_device(mtd); 118 return -ENODEV; 119 } 120 121 file->private_data = mtd; 122 123 /* You can't open it RW if it's not a writeable device */ 124 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { 125 put_mtd_device(mtd); 126 return -EACCES; 127 } 128 129 return 0; 130 } /* mtd_open */ 131 132 /*====================================================================*/ 133 134 static int mtd_close(struct inode *inode, struct file *file) 135 { 136 struct mtd_info *mtd; 137 138 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 139 140 mtd = TO_MTD(file); 141 142 if (mtd->sync) 143 mtd->sync(mtd); 144 145 put_mtd_device(mtd); 146 147 return 0; 148 } /* mtd_close */ 149 150 /* FIXME: This _really_ needs to die. In 2.5, we should lock the 151 userspace buffer down and use it directly with readv/writev. 152 */ 153 #define MAX_KMALLOC_SIZE 0x20000 154 155 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 156 { 157 struct mtd_info *mtd = TO_MTD(file); 158 size_t retlen=0; 159 size_t total_retlen=0; 160 int ret=0; 161 int len; 162 char *kbuf; 163 164 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 165 166 if (*ppos + count > mtd->size) 167 count = mtd->size - *ppos; 168 169 if (!count) 170 return 0; 171 172 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 173 and pass them directly to the MTD functions */ 174 while (count) { 175 if (count > MAX_KMALLOC_SIZE) 176 len = MAX_KMALLOC_SIZE; 177 else 178 len = count; 179 180 kbuf=kmalloc(len,GFP_KERNEL); 181 if (!kbuf) 182 return -ENOMEM; 183 184 switch (MTD_MODE(file)) { 185 case MTD_MODE_OTP_FACT: 186 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 187 break; 188 case MTD_MODE_OTP_USER: 189 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 190 break; 191 default: 192 ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf); 193 } 194 /* Nand returns -EBADMSG on ecc errors, but it returns 195 * the data. For our userspace tools it is important 196 * to dump areas with ecc errors ! 197 * Userspace software which accesses NAND this way 198 * must be aware of the fact that it deals with NAND 199 */ 200 if (!ret || (ret == -EBADMSG)) { 201 *ppos += retlen; 202 if (copy_to_user(buf, kbuf, retlen)) { 203 kfree(kbuf); 204 return -EFAULT; 205 } 206 else 207 total_retlen += retlen; 208 209 count -= retlen; 210 buf += retlen; 211 if (retlen == 0) 212 count = 0; 213 } 214 else { 215 kfree(kbuf); 216 return ret; 217 } 218 219 kfree(kbuf); 220 } 221 222 return total_retlen; 223 } /* mtd_read */ 224 225 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 226 { 227 struct mtd_info *mtd = TO_MTD(file); 228 char *kbuf; 229 size_t retlen; 230 size_t total_retlen=0; 231 int ret=0; 232 int len; 233 234 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 235 236 if (*ppos == mtd->size) 237 return -ENOSPC; 238 239 if (*ppos + count > mtd->size) 240 count = mtd->size - *ppos; 241 242 if (!count) 243 return 0; 244 245 while (count) { 246 if (count > MAX_KMALLOC_SIZE) 247 len = MAX_KMALLOC_SIZE; 248 else 249 len = count; 250 251 kbuf=kmalloc(len,GFP_KERNEL); 252 if (!kbuf) { 253 printk("kmalloc is null\n"); 254 return -ENOMEM; 255 } 256 257 if (copy_from_user(kbuf, buf, len)) { 258 kfree(kbuf); 259 return -EFAULT; 260 } 261 262 switch (MTD_MODE(file)) { 263 case MTD_MODE_OTP_FACT: 264 ret = -EROFS; 265 break; 266 case MTD_MODE_OTP_USER: 267 if (!mtd->write_user_prot_reg) { 268 ret = -EOPNOTSUPP; 269 break; 270 } 271 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 272 break; 273 default: 274 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 275 } 276 if (!ret) { 277 *ppos += retlen; 278 total_retlen += retlen; 279 count -= retlen; 280 buf += retlen; 281 } 282 else { 283 kfree(kbuf); 284 return ret; 285 } 286 287 kfree(kbuf); 288 } 289 290 return total_retlen; 291 } /* mtd_write */ 292 293 /*====================================================================== 294 295 IOCTL calls for getting device parameters. 296 297 ======================================================================*/ 298 static void mtdchar_erase_callback (struct erase_info *instr) 299 { 300 wake_up((wait_queue_head_t *)instr->priv); 301 } 302 303 static int mtd_ioctl(struct inode *inode, struct file *file, 304 u_int cmd, u_long arg) 305 { 306 struct mtd_info *mtd = TO_MTD(file); 307 void __user *argp = (void __user *)arg; 308 int ret = 0; 309 u_long size; 310 311 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 312 313 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 314 if (cmd & IOC_IN) { 315 if (!access_ok(VERIFY_READ, argp, size)) 316 return -EFAULT; 317 } 318 if (cmd & IOC_OUT) { 319 if (!access_ok(VERIFY_WRITE, argp, size)) 320 return -EFAULT; 321 } 322 323 switch (cmd) { 324 case MEMGETREGIONCOUNT: 325 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 326 return -EFAULT; 327 break; 328 329 case MEMGETREGIONINFO: 330 { 331 struct region_info_user ur; 332 333 if (copy_from_user(&ur, argp, sizeof(struct region_info_user))) 334 return -EFAULT; 335 336 if (ur.regionindex >= mtd->numeraseregions) 337 return -EINVAL; 338 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]), 339 sizeof(struct mtd_erase_region_info))) 340 return -EFAULT; 341 break; 342 } 343 344 case MEMGETINFO: 345 if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user))) 346 return -EFAULT; 347 break; 348 349 case MEMERASE: 350 { 351 struct erase_info *erase; 352 353 if(!(file->f_mode & 2)) 354 return -EPERM; 355 356 erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL); 357 if (!erase) 358 ret = -ENOMEM; 359 else { 360 wait_queue_head_t waitq; 361 DECLARE_WAITQUEUE(wait, current); 362 363 init_waitqueue_head(&waitq); 364 365 memset (erase,0,sizeof(struct erase_info)); 366 if (copy_from_user(&erase->addr, argp, 367 sizeof(struct erase_info_user))) { 368 kfree(erase); 369 return -EFAULT; 370 } 371 erase->mtd = mtd; 372 erase->callback = mtdchar_erase_callback; 373 erase->priv = (unsigned long)&waitq; 374 375 /* 376 FIXME: Allow INTERRUPTIBLE. Which means 377 not having the wait_queue head on the stack. 378 379 If the wq_head is on the stack, and we 380 leave because we got interrupted, then the 381 wq_head is no longer there when the 382 callback routine tries to wake us up. 383 */ 384 ret = mtd->erase(mtd, erase); 385 if (!ret) { 386 set_current_state(TASK_UNINTERRUPTIBLE); 387 add_wait_queue(&waitq, &wait); 388 if (erase->state != MTD_ERASE_DONE && 389 erase->state != MTD_ERASE_FAILED) 390 schedule(); 391 remove_wait_queue(&waitq, &wait); 392 set_current_state(TASK_RUNNING); 393 394 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 395 } 396 kfree(erase); 397 } 398 break; 399 } 400 401 case MEMWRITEOOB: 402 { 403 struct mtd_oob_buf buf; 404 void *databuf; 405 ssize_t retlen; 406 407 if(!(file->f_mode & 2)) 408 return -EPERM; 409 410 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 411 return -EFAULT; 412 413 if (buf.length > 0x4096) 414 return -EINVAL; 415 416 if (!mtd->write_oob) 417 ret = -EOPNOTSUPP; 418 else 419 ret = access_ok(VERIFY_READ, buf.ptr, 420 buf.length) ? 0 : EFAULT; 421 422 if (ret) 423 return ret; 424 425 databuf = kmalloc(buf.length, GFP_KERNEL); 426 if (!databuf) 427 return -ENOMEM; 428 429 if (copy_from_user(databuf, buf.ptr, buf.length)) { 430 kfree(databuf); 431 return -EFAULT; 432 } 433 434 ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf); 435 436 if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t))) 437 ret = -EFAULT; 438 439 kfree(databuf); 440 break; 441 442 } 443 444 case MEMREADOOB: 445 { 446 struct mtd_oob_buf buf; 447 void *databuf; 448 ssize_t retlen; 449 450 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 451 return -EFAULT; 452 453 if (buf.length > 0x4096) 454 return -EINVAL; 455 456 if (!mtd->read_oob) 457 ret = -EOPNOTSUPP; 458 else 459 ret = access_ok(VERIFY_WRITE, buf.ptr, 460 buf.length) ? 0 : -EFAULT; 461 462 if (ret) 463 return ret; 464 465 databuf = kmalloc(buf.length, GFP_KERNEL); 466 if (!databuf) 467 return -ENOMEM; 468 469 ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf); 470 471 if (put_user(retlen, (uint32_t __user *)argp)) 472 ret = -EFAULT; 473 else if (retlen && copy_to_user(buf.ptr, databuf, retlen)) 474 ret = -EFAULT; 475 476 kfree(databuf); 477 break; 478 } 479 480 case MEMLOCK: 481 { 482 struct erase_info_user info; 483 484 if (copy_from_user(&info, argp, sizeof(info))) 485 return -EFAULT; 486 487 if (!mtd->lock) 488 ret = -EOPNOTSUPP; 489 else 490 ret = mtd->lock(mtd, info.start, info.length); 491 break; 492 } 493 494 case MEMUNLOCK: 495 { 496 struct erase_info_user info; 497 498 if (copy_from_user(&info, argp, sizeof(info))) 499 return -EFAULT; 500 501 if (!mtd->unlock) 502 ret = -EOPNOTSUPP; 503 else 504 ret = mtd->unlock(mtd, info.start, info.length); 505 break; 506 } 507 508 case MEMSETOOBSEL: 509 { 510 if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo))) 511 return -EFAULT; 512 break; 513 } 514 515 case MEMGETOOBSEL: 516 { 517 if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo))) 518 return -EFAULT; 519 break; 520 } 521 522 case MEMGETBADBLOCK: 523 { 524 loff_t offs; 525 526 if (copy_from_user(&offs, argp, sizeof(loff_t))) 527 return -EFAULT; 528 if (!mtd->block_isbad) 529 ret = -EOPNOTSUPP; 530 else 531 return mtd->block_isbad(mtd, offs); 532 break; 533 } 534 535 case MEMSETBADBLOCK: 536 { 537 loff_t offs; 538 539 if (copy_from_user(&offs, argp, sizeof(loff_t))) 540 return -EFAULT; 541 if (!mtd->block_markbad) 542 ret = -EOPNOTSUPP; 543 else 544 return mtd->block_markbad(mtd, offs); 545 break; 546 } 547 548 #ifdef CONFIG_MTD_OTP 549 case OTPSELECT: 550 { 551 int mode; 552 if (copy_from_user(&mode, argp, sizeof(int))) 553 return -EFAULT; 554 SET_MTD_MODE(file, 0); 555 switch (mode) { 556 case MTD_OTP_FACTORY: 557 if (!mtd->read_fact_prot_reg) 558 ret = -EOPNOTSUPP; 559 else 560 SET_MTD_MODE(file, MTD_MODE_OTP_FACT); 561 break; 562 case MTD_OTP_USER: 563 if (!mtd->read_fact_prot_reg) 564 ret = -EOPNOTSUPP; 565 else 566 SET_MTD_MODE(file, MTD_MODE_OTP_USER); 567 break; 568 default: 569 ret = -EINVAL; 570 case MTD_OTP_OFF: 571 break; 572 } 573 file->f_pos = 0; 574 break; 575 } 576 577 case OTPGETREGIONCOUNT: 578 case OTPGETREGIONINFO: 579 { 580 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 581 if (!buf) 582 return -ENOMEM; 583 ret = -EOPNOTSUPP; 584 switch (MTD_MODE(file)) { 585 case MTD_MODE_OTP_FACT: 586 if (mtd->get_fact_prot_info) 587 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 588 break; 589 case MTD_MODE_OTP_USER: 590 if (mtd->get_user_prot_info) 591 ret = mtd->get_user_prot_info(mtd, buf, 4096); 592 break; 593 } 594 if (ret >= 0) { 595 if (cmd == OTPGETREGIONCOUNT) { 596 int nbr = ret / sizeof(struct otp_info); 597 ret = copy_to_user(argp, &nbr, sizeof(int)); 598 } else 599 ret = copy_to_user(argp, buf, ret); 600 if (ret) 601 ret = -EFAULT; 602 } 603 kfree(buf); 604 break; 605 } 606 607 case OTPLOCK: 608 { 609 struct otp_info info; 610 611 if (MTD_MODE(file) != MTD_MODE_OTP_USER) 612 return -EINVAL; 613 if (copy_from_user(&info, argp, sizeof(info))) 614 return -EFAULT; 615 if (!mtd->lock_user_prot_reg) 616 return -EOPNOTSUPP; 617 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length); 618 break; 619 } 620 #endif 621 622 default: 623 ret = -ENOTTY; 624 } 625 626 return ret; 627 } /* memory_ioctl */ 628 629 static struct file_operations mtd_fops = { 630 .owner = THIS_MODULE, 631 .llseek = mtd_lseek, 632 .read = mtd_read, 633 .write = mtd_write, 634 .ioctl = mtd_ioctl, 635 .open = mtd_open, 636 .release = mtd_close, 637 }; 638 639 static int __init init_mtdchar(void) 640 { 641 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) { 642 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", 643 MTD_CHAR_MAJOR); 644 return -EAGAIN; 645 } 646 647 mtd_class = class_create(THIS_MODULE, "mtd"); 648 649 if (IS_ERR(mtd_class)) { 650 printk(KERN_ERR "Error creating mtd class.\n"); 651 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 652 return PTR_ERR(mtd_class); 653 } 654 655 register_mtd_user(¬ifier); 656 return 0; 657 } 658 659 static void __exit cleanup_mtdchar(void) 660 { 661 unregister_mtd_user(¬ifier); 662 class_destroy(mtd_class); 663 unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 664 } 665 666 module_init(init_mtdchar); 667 module_exit(cleanup_mtdchar); 668 669 670 MODULE_LICENSE("GPL"); 671 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 672 MODULE_DESCRIPTION("Direct character-device access to MTD devices"); 673