1 /* 2 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 * 18 */ 19 20 #include <linux/device.h> 21 #include <linux/fs.h> 22 #include <linux/mm.h> 23 #include <linux/err.h> 24 #include <linux/init.h> 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <linux/sched.h> 29 #include <linux/mutex.h> 30 #include <linux/backing-dev.h> 31 #include <linux/compat.h> 32 #include <linux/mount.h> 33 #include <linux/blkpg.h> 34 #include <linux/mtd/mtd.h> 35 #include <linux/mtd/partitions.h> 36 #include <linux/mtd/map.h> 37 38 #include <asm/uaccess.h> 39 40 #define MTD_INODE_FS_MAGIC 0x11307854 41 static DEFINE_MUTEX(mtd_mutex); 42 static struct vfsmount *mtd_inode_mnt __read_mostly; 43 44 /* 45 * Data structure to hold the pointer to the mtd device as well 46 * as mode information of various use cases. 47 */ 48 struct mtd_file_info { 49 struct mtd_info *mtd; 50 struct inode *ino; 51 enum mtd_file_modes mode; 52 }; 53 54 static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig) 55 { 56 struct mtd_file_info *mfi = file->private_data; 57 struct mtd_info *mtd = mfi->mtd; 58 59 switch (orig) { 60 case SEEK_SET: 61 break; 62 case SEEK_CUR: 63 offset += file->f_pos; 64 break; 65 case SEEK_END: 66 offset += mtd->size; 67 break; 68 default: 69 return -EINVAL; 70 } 71 72 if (offset >= 0 && offset <= mtd->size) 73 return file->f_pos = offset; 74 75 return -EINVAL; 76 } 77 78 79 80 static int mtdchar_open(struct inode *inode, struct file *file) 81 { 82 int minor = iminor(inode); 83 int devnum = minor >> 1; 84 int ret = 0; 85 struct mtd_info *mtd; 86 struct mtd_file_info *mfi; 87 struct inode *mtd_ino; 88 89 pr_debug("MTD_open\n"); 90 91 /* You can't open the RO devices RW */ 92 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 93 return -EACCES; 94 95 mutex_lock(&mtd_mutex); 96 mtd = get_mtd_device(NULL, devnum); 97 98 if (IS_ERR(mtd)) { 99 ret = PTR_ERR(mtd); 100 goto out; 101 } 102 103 if (mtd->type == MTD_ABSENT) { 104 put_mtd_device(mtd); 105 ret = -ENODEV; 106 goto out; 107 } 108 109 mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum); 110 if (!mtd_ino) { 111 put_mtd_device(mtd); 112 ret = -ENOMEM; 113 goto out; 114 } 115 if (mtd_ino->i_state & I_NEW) { 116 mtd_ino->i_private = mtd; 117 mtd_ino->i_mode = S_IFCHR; 118 mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info; 119 unlock_new_inode(mtd_ino); 120 } 121 file->f_mapping = mtd_ino->i_mapping; 122 123 /* You can't open it RW if it's not a writeable device */ 124 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 125 iput(mtd_ino); 126 put_mtd_device(mtd); 127 ret = -EACCES; 128 goto out; 129 } 130 131 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 132 if (!mfi) { 133 iput(mtd_ino); 134 put_mtd_device(mtd); 135 ret = -ENOMEM; 136 goto out; 137 } 138 mfi->ino = mtd_ino; 139 mfi->mtd = mtd; 140 file->private_data = mfi; 141 142 out: 143 mutex_unlock(&mtd_mutex); 144 return ret; 145 } /* mtdchar_open */ 146 147 /*====================================================================*/ 148 149 static int mtdchar_close(struct inode *inode, struct file *file) 150 { 151 struct mtd_file_info *mfi = file->private_data; 152 struct mtd_info *mtd = mfi->mtd; 153 154 pr_debug("MTD_close\n"); 155 156 /* Only sync if opened RW */ 157 if ((file->f_mode & FMODE_WRITE) && mtd->sync) 158 mtd->sync(mtd); 159 160 iput(mfi->ino); 161 162 put_mtd_device(mtd); 163 file->private_data = NULL; 164 kfree(mfi); 165 166 return 0; 167 } /* mtdchar_close */ 168 169 /* Back in June 2001, dwmw2 wrote: 170 * 171 * FIXME: This _really_ needs to die. In 2.5, we should lock the 172 * userspace buffer down and use it directly with readv/writev. 173 * 174 * The implementation below, using mtd_kmalloc_up_to, mitigates 175 * allocation failures when the system is under low-memory situations 176 * or if memory is highly fragmented at the cost of reducing the 177 * performance of the requested transfer due to a smaller buffer size. 178 * 179 * A more complex but more memory-efficient implementation based on 180 * get_user_pages and iovecs to cover extents of those pages is a 181 * longer-term goal, as intimated by dwmw2 above. However, for the 182 * write case, this requires yet more complex head and tail transfer 183 * handling when those head and tail offsets and sizes are such that 184 * alignment requirements are not met in the NAND subdriver. 185 */ 186 187 static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, 188 loff_t *ppos) 189 { 190 struct mtd_file_info *mfi = file->private_data; 191 struct mtd_info *mtd = mfi->mtd; 192 size_t retlen=0; 193 size_t total_retlen=0; 194 int ret=0; 195 int len; 196 size_t size = count; 197 char *kbuf; 198 199 pr_debug("MTD_read\n"); 200 201 if (*ppos + count > mtd->size) 202 count = mtd->size - *ppos; 203 204 if (!count) 205 return 0; 206 207 kbuf = mtd_kmalloc_up_to(mtd, &size); 208 if (!kbuf) 209 return -ENOMEM; 210 211 while (count) { 212 len = min_t(size_t, count, size); 213 214 switch (mfi->mode) { 215 case MTD_FILE_MODE_OTP_FACTORY: 216 ret = mtd_read_fact_prot_reg(mtd, *ppos, len, 217 &retlen, kbuf); 218 break; 219 case MTD_FILE_MODE_OTP_USER: 220 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 221 break; 222 case MTD_FILE_MODE_RAW: 223 { 224 struct mtd_oob_ops ops; 225 226 ops.mode = MTD_OPS_RAW; 227 ops.datbuf = kbuf; 228 ops.oobbuf = NULL; 229 ops.len = len; 230 231 ret = mtd_read_oob(mtd, *ppos, &ops); 232 retlen = ops.retlen; 233 break; 234 } 235 default: 236 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf); 237 } 238 /* Nand returns -EBADMSG on ECC errors, but it returns 239 * the data. For our userspace tools it is important 240 * to dump areas with ECC errors! 241 * For kernel internal usage it also might return -EUCLEAN 242 * to signal the caller that a bitflip has occurred and has 243 * been corrected by the ECC algorithm. 244 * Userspace software which accesses NAND this way 245 * must be aware of the fact that it deals with NAND 246 */ 247 if (!ret || mtd_is_bitflip_or_eccerr(ret)) { 248 *ppos += retlen; 249 if (copy_to_user(buf, kbuf, retlen)) { 250 kfree(kbuf); 251 return -EFAULT; 252 } 253 else 254 total_retlen += retlen; 255 256 count -= retlen; 257 buf += retlen; 258 if (retlen == 0) 259 count = 0; 260 } 261 else { 262 kfree(kbuf); 263 return ret; 264 } 265 266 } 267 268 kfree(kbuf); 269 return total_retlen; 270 } /* mtdchar_read */ 271 272 static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count, 273 loff_t *ppos) 274 { 275 struct mtd_file_info *mfi = file->private_data; 276 struct mtd_info *mtd = mfi->mtd; 277 size_t size = count; 278 char *kbuf; 279 size_t retlen; 280 size_t total_retlen=0; 281 int ret=0; 282 int len; 283 284 pr_debug("MTD_write\n"); 285 286 if (*ppos == mtd->size) 287 return -ENOSPC; 288 289 if (*ppos + count > mtd->size) 290 count = mtd->size - *ppos; 291 292 if (!count) 293 return 0; 294 295 kbuf = mtd_kmalloc_up_to(mtd, &size); 296 if (!kbuf) 297 return -ENOMEM; 298 299 while (count) { 300 len = min_t(size_t, count, size); 301 302 if (copy_from_user(kbuf, buf, len)) { 303 kfree(kbuf); 304 return -EFAULT; 305 } 306 307 switch (mfi->mode) { 308 case MTD_FILE_MODE_OTP_FACTORY: 309 ret = -EROFS; 310 break; 311 case MTD_FILE_MODE_OTP_USER: 312 if (!mtd->write_user_prot_reg) { 313 ret = -EOPNOTSUPP; 314 break; 315 } 316 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 317 break; 318 319 case MTD_FILE_MODE_RAW: 320 { 321 struct mtd_oob_ops ops; 322 323 ops.mode = MTD_OPS_RAW; 324 ops.datbuf = kbuf; 325 ops.oobbuf = NULL; 326 ops.ooboffs = 0; 327 ops.len = len; 328 329 ret = mtd_write_oob(mtd, *ppos, &ops); 330 retlen = ops.retlen; 331 break; 332 } 333 334 default: 335 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf); 336 } 337 if (!ret) { 338 *ppos += retlen; 339 total_retlen += retlen; 340 count -= retlen; 341 buf += retlen; 342 } 343 else { 344 kfree(kbuf); 345 return ret; 346 } 347 } 348 349 kfree(kbuf); 350 return total_retlen; 351 } /* mtdchar_write */ 352 353 /*====================================================================== 354 355 IOCTL calls for getting device parameters. 356 357 ======================================================================*/ 358 static void mtdchar_erase_callback (struct erase_info *instr) 359 { 360 wake_up((wait_queue_head_t *)instr->priv); 361 } 362 363 #ifdef CONFIG_HAVE_MTD_OTP 364 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 365 { 366 struct mtd_info *mtd = mfi->mtd; 367 int ret = 0; 368 369 switch (mode) { 370 case MTD_OTP_FACTORY: 371 if (!mtd->read_fact_prot_reg) 372 ret = -EOPNOTSUPP; 373 else 374 mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 375 break; 376 case MTD_OTP_USER: 377 if (!mtd->read_fact_prot_reg) 378 ret = -EOPNOTSUPP; 379 else 380 mfi->mode = MTD_FILE_MODE_OTP_USER; 381 break; 382 default: 383 ret = -EINVAL; 384 case MTD_OTP_OFF: 385 break; 386 } 387 return ret; 388 } 389 #else 390 # define otp_select_filemode(f,m) -EOPNOTSUPP 391 #endif 392 393 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, 394 uint64_t start, uint32_t length, void __user *ptr, 395 uint32_t __user *retp) 396 { 397 struct mtd_file_info *mfi = file->private_data; 398 struct mtd_oob_ops ops; 399 uint32_t retlen; 400 int ret = 0; 401 402 if (!(file->f_mode & FMODE_WRITE)) 403 return -EPERM; 404 405 if (length > 4096) 406 return -EINVAL; 407 408 if (!mtd->write_oob) 409 ret = -EOPNOTSUPP; 410 else 411 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; 412 413 if (ret) 414 return ret; 415 416 ops.ooblen = length; 417 ops.ooboffs = start & (mtd->writesize - 1); 418 ops.datbuf = NULL; 419 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 420 MTD_OPS_PLACE_OOB; 421 422 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 423 return -EINVAL; 424 425 ops.oobbuf = memdup_user(ptr, length); 426 if (IS_ERR(ops.oobbuf)) 427 return PTR_ERR(ops.oobbuf); 428 429 start &= ~((uint64_t)mtd->writesize - 1); 430 ret = mtd_write_oob(mtd, start, &ops); 431 432 if (ops.oobretlen > 0xFFFFFFFFU) 433 ret = -EOVERFLOW; 434 retlen = ops.oobretlen; 435 if (copy_to_user(retp, &retlen, sizeof(length))) 436 ret = -EFAULT; 437 438 kfree(ops.oobbuf); 439 return ret; 440 } 441 442 static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, 443 uint64_t start, uint32_t length, void __user *ptr, 444 uint32_t __user *retp) 445 { 446 struct mtd_file_info *mfi = file->private_data; 447 struct mtd_oob_ops ops; 448 int ret = 0; 449 450 if (length > 4096) 451 return -EINVAL; 452 453 if (!mtd->read_oob) 454 ret = -EOPNOTSUPP; 455 else 456 ret = access_ok(VERIFY_WRITE, ptr, 457 length) ? 0 : -EFAULT; 458 if (ret) 459 return ret; 460 461 ops.ooblen = length; 462 ops.ooboffs = start & (mtd->writesize - 1); 463 ops.datbuf = NULL; 464 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 465 MTD_OPS_PLACE_OOB; 466 467 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 468 return -EINVAL; 469 470 ops.oobbuf = kmalloc(length, GFP_KERNEL); 471 if (!ops.oobbuf) 472 return -ENOMEM; 473 474 start &= ~((uint64_t)mtd->writesize - 1); 475 ret = mtd_read_oob(mtd, start, &ops); 476 477 if (put_user(ops.oobretlen, retp)) 478 ret = -EFAULT; 479 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, 480 ops.oobretlen)) 481 ret = -EFAULT; 482 483 kfree(ops.oobbuf); 484 485 /* 486 * NAND returns -EBADMSG on ECC errors, but it returns the OOB 487 * data. For our userspace tools it is important to dump areas 488 * with ECC errors! 489 * For kernel internal usage it also might return -EUCLEAN 490 * to signal the caller that a bitflip has occured and has 491 * been corrected by the ECC algorithm. 492 * 493 * Note: currently the standard NAND function, nand_read_oob_std, 494 * does not calculate ECC for the OOB area, so do not rely on 495 * this behavior unless you have replaced it with your own. 496 */ 497 if (mtd_is_bitflip_or_eccerr(ret)) 498 return 0; 499 500 return ret; 501 } 502 503 /* 504 * Copies (and truncates, if necessary) data from the larger struct, 505 * nand_ecclayout, to the smaller, deprecated layout struct, 506 * nand_ecclayout_user. This is necessary only to support the deprecated 507 * API ioctl ECCGETLAYOUT while allowing all new functionality to use 508 * nand_ecclayout flexibly (i.e. the struct may change size in new 509 * releases without requiring major rewrites). 510 */ 511 static int shrink_ecclayout(const struct nand_ecclayout *from, 512 struct nand_ecclayout_user *to) 513 { 514 int i; 515 516 if (!from || !to) 517 return -EINVAL; 518 519 memset(to, 0, sizeof(*to)); 520 521 to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES); 522 for (i = 0; i < to->eccbytes; i++) 523 to->eccpos[i] = from->eccpos[i]; 524 525 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { 526 if (from->oobfree[i].length == 0 && 527 from->oobfree[i].offset == 0) 528 break; 529 to->oobavail += from->oobfree[i].length; 530 to->oobfree[i] = from->oobfree[i]; 531 } 532 533 return 0; 534 } 535 536 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, 537 struct blkpg_ioctl_arg __user *arg) 538 { 539 struct blkpg_ioctl_arg a; 540 struct blkpg_partition p; 541 542 if (!capable(CAP_SYS_ADMIN)) 543 return -EPERM; 544 545 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg))) 546 return -EFAULT; 547 548 if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition))) 549 return -EFAULT; 550 551 switch (a.op) { 552 case BLKPG_ADD_PARTITION: 553 554 /* Only master mtd device must be used to add partitions */ 555 if (mtd_is_partition(mtd)) 556 return -EINVAL; 557 558 return mtd_add_partition(mtd, p.devname, p.start, p.length); 559 560 case BLKPG_DEL_PARTITION: 561 562 if (p.pno < 0) 563 return -EINVAL; 564 565 return mtd_del_partition(mtd, p.pno); 566 567 default: 568 return -EINVAL; 569 } 570 } 571 572 static int mtdchar_write_ioctl(struct mtd_info *mtd, 573 struct mtd_write_req __user *argp) 574 { 575 struct mtd_write_req req; 576 struct mtd_oob_ops ops; 577 void __user *usr_data, *usr_oob; 578 int ret; 579 580 if (copy_from_user(&req, argp, sizeof(req)) || 581 !access_ok(VERIFY_READ, req.usr_data, req.len) || 582 !access_ok(VERIFY_READ, req.usr_oob, req.ooblen)) 583 return -EFAULT; 584 if (!mtd->write_oob) 585 return -EOPNOTSUPP; 586 587 ops.mode = req.mode; 588 ops.len = (size_t)req.len; 589 ops.ooblen = (size_t)req.ooblen; 590 ops.ooboffs = 0; 591 592 usr_data = (void __user *)(uintptr_t)req.usr_data; 593 usr_oob = (void __user *)(uintptr_t)req.usr_oob; 594 595 if (req.usr_data) { 596 ops.datbuf = memdup_user(usr_data, ops.len); 597 if (IS_ERR(ops.datbuf)) 598 return PTR_ERR(ops.datbuf); 599 } else { 600 ops.datbuf = NULL; 601 } 602 603 if (req.usr_oob) { 604 ops.oobbuf = memdup_user(usr_oob, ops.ooblen); 605 if (IS_ERR(ops.oobbuf)) { 606 kfree(ops.datbuf); 607 return PTR_ERR(ops.oobbuf); 608 } 609 } else { 610 ops.oobbuf = NULL; 611 } 612 613 ret = mtd_write_oob(mtd, (loff_t)req.start, &ops); 614 615 kfree(ops.datbuf); 616 kfree(ops.oobbuf); 617 618 return ret; 619 } 620 621 static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) 622 { 623 struct mtd_file_info *mfi = file->private_data; 624 struct mtd_info *mtd = mfi->mtd; 625 void __user *argp = (void __user *)arg; 626 int ret = 0; 627 u_long size; 628 struct mtd_info_user info; 629 630 pr_debug("MTD_ioctl\n"); 631 632 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 633 if (cmd & IOC_IN) { 634 if (!access_ok(VERIFY_READ, argp, size)) 635 return -EFAULT; 636 } 637 if (cmd & IOC_OUT) { 638 if (!access_ok(VERIFY_WRITE, argp, size)) 639 return -EFAULT; 640 } 641 642 switch (cmd) { 643 case MEMGETREGIONCOUNT: 644 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 645 return -EFAULT; 646 break; 647 648 case MEMGETREGIONINFO: 649 { 650 uint32_t ur_idx; 651 struct mtd_erase_region_info *kr; 652 struct region_info_user __user *ur = argp; 653 654 if (get_user(ur_idx, &(ur->regionindex))) 655 return -EFAULT; 656 657 if (ur_idx >= mtd->numeraseregions) 658 return -EINVAL; 659 660 kr = &(mtd->eraseregions[ur_idx]); 661 662 if (put_user(kr->offset, &(ur->offset)) 663 || put_user(kr->erasesize, &(ur->erasesize)) 664 || put_user(kr->numblocks, &(ur->numblocks))) 665 return -EFAULT; 666 667 break; 668 } 669 670 case MEMGETINFO: 671 memset(&info, 0, sizeof(info)); 672 info.type = mtd->type; 673 info.flags = mtd->flags; 674 info.size = mtd->size; 675 info.erasesize = mtd->erasesize; 676 info.writesize = mtd->writesize; 677 info.oobsize = mtd->oobsize; 678 /* The below field is obsolete */ 679 info.padding = 0; 680 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 681 return -EFAULT; 682 break; 683 684 case MEMERASE: 685 case MEMERASE64: 686 { 687 struct erase_info *erase; 688 689 if(!(file->f_mode & FMODE_WRITE)) 690 return -EPERM; 691 692 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 693 if (!erase) 694 ret = -ENOMEM; 695 else { 696 wait_queue_head_t waitq; 697 DECLARE_WAITQUEUE(wait, current); 698 699 init_waitqueue_head(&waitq); 700 701 if (cmd == MEMERASE64) { 702 struct erase_info_user64 einfo64; 703 704 if (copy_from_user(&einfo64, argp, 705 sizeof(struct erase_info_user64))) { 706 kfree(erase); 707 return -EFAULT; 708 } 709 erase->addr = einfo64.start; 710 erase->len = einfo64.length; 711 } else { 712 struct erase_info_user einfo32; 713 714 if (copy_from_user(&einfo32, argp, 715 sizeof(struct erase_info_user))) { 716 kfree(erase); 717 return -EFAULT; 718 } 719 erase->addr = einfo32.start; 720 erase->len = einfo32.length; 721 } 722 erase->mtd = mtd; 723 erase->callback = mtdchar_erase_callback; 724 erase->priv = (unsigned long)&waitq; 725 726 /* 727 FIXME: Allow INTERRUPTIBLE. Which means 728 not having the wait_queue head on the stack. 729 730 If the wq_head is on the stack, and we 731 leave because we got interrupted, then the 732 wq_head is no longer there when the 733 callback routine tries to wake us up. 734 */ 735 ret = mtd_erase(mtd, erase); 736 if (!ret) { 737 set_current_state(TASK_UNINTERRUPTIBLE); 738 add_wait_queue(&waitq, &wait); 739 if (erase->state != MTD_ERASE_DONE && 740 erase->state != MTD_ERASE_FAILED) 741 schedule(); 742 remove_wait_queue(&waitq, &wait); 743 set_current_state(TASK_RUNNING); 744 745 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 746 } 747 kfree(erase); 748 } 749 break; 750 } 751 752 case MEMWRITEOOB: 753 { 754 struct mtd_oob_buf buf; 755 struct mtd_oob_buf __user *buf_user = argp; 756 757 /* NOTE: writes return length to buf_user->length */ 758 if (copy_from_user(&buf, argp, sizeof(buf))) 759 ret = -EFAULT; 760 else 761 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 762 buf.ptr, &buf_user->length); 763 break; 764 } 765 766 case MEMREADOOB: 767 { 768 struct mtd_oob_buf buf; 769 struct mtd_oob_buf __user *buf_user = argp; 770 771 /* NOTE: writes return length to buf_user->start */ 772 if (copy_from_user(&buf, argp, sizeof(buf))) 773 ret = -EFAULT; 774 else 775 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 776 buf.ptr, &buf_user->start); 777 break; 778 } 779 780 case MEMWRITEOOB64: 781 { 782 struct mtd_oob_buf64 buf; 783 struct mtd_oob_buf64 __user *buf_user = argp; 784 785 if (copy_from_user(&buf, argp, sizeof(buf))) 786 ret = -EFAULT; 787 else 788 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 789 (void __user *)(uintptr_t)buf.usr_ptr, 790 &buf_user->length); 791 break; 792 } 793 794 case MEMREADOOB64: 795 { 796 struct mtd_oob_buf64 buf; 797 struct mtd_oob_buf64 __user *buf_user = argp; 798 799 if (copy_from_user(&buf, argp, sizeof(buf))) 800 ret = -EFAULT; 801 else 802 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 803 (void __user *)(uintptr_t)buf.usr_ptr, 804 &buf_user->length); 805 break; 806 } 807 808 case MEMWRITE: 809 { 810 ret = mtdchar_write_ioctl(mtd, 811 (struct mtd_write_req __user *)arg); 812 break; 813 } 814 815 case MEMLOCK: 816 { 817 struct erase_info_user einfo; 818 819 if (copy_from_user(&einfo, argp, sizeof(einfo))) 820 return -EFAULT; 821 822 if (!mtd->lock) 823 ret = -EOPNOTSUPP; 824 else 825 ret = mtd->lock(mtd, einfo.start, einfo.length); 826 break; 827 } 828 829 case MEMUNLOCK: 830 { 831 struct erase_info_user einfo; 832 833 if (copy_from_user(&einfo, argp, sizeof(einfo))) 834 return -EFAULT; 835 836 if (!mtd->unlock) 837 ret = -EOPNOTSUPP; 838 else 839 ret = mtd->unlock(mtd, einfo.start, einfo.length); 840 break; 841 } 842 843 case MEMISLOCKED: 844 { 845 struct erase_info_user einfo; 846 847 if (copy_from_user(&einfo, argp, sizeof(einfo))) 848 return -EFAULT; 849 850 if (!mtd->is_locked) 851 ret = -EOPNOTSUPP; 852 else 853 ret = mtd->is_locked(mtd, einfo.start, einfo.length); 854 break; 855 } 856 857 /* Legacy interface */ 858 case MEMGETOOBSEL: 859 { 860 struct nand_oobinfo oi; 861 862 if (!mtd->ecclayout) 863 return -EOPNOTSUPP; 864 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) 865 return -EINVAL; 866 867 oi.useecc = MTD_NANDECC_AUTOPLACE; 868 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); 869 memcpy(&oi.oobfree, mtd->ecclayout->oobfree, 870 sizeof(oi.oobfree)); 871 oi.eccbytes = mtd->ecclayout->eccbytes; 872 873 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 874 return -EFAULT; 875 break; 876 } 877 878 case MEMGETBADBLOCK: 879 { 880 loff_t offs; 881 882 if (copy_from_user(&offs, argp, sizeof(loff_t))) 883 return -EFAULT; 884 if (!mtd->block_isbad) 885 ret = -EOPNOTSUPP; 886 else 887 return mtd->block_isbad(mtd, offs); 888 break; 889 } 890 891 case MEMSETBADBLOCK: 892 { 893 loff_t offs; 894 895 if (copy_from_user(&offs, argp, sizeof(loff_t))) 896 return -EFAULT; 897 if (!mtd->block_markbad) 898 ret = -EOPNOTSUPP; 899 else 900 return mtd->block_markbad(mtd, offs); 901 break; 902 } 903 904 #ifdef CONFIG_HAVE_MTD_OTP 905 case OTPSELECT: 906 { 907 int mode; 908 if (copy_from_user(&mode, argp, sizeof(int))) 909 return -EFAULT; 910 911 mfi->mode = MTD_FILE_MODE_NORMAL; 912 913 ret = otp_select_filemode(mfi, mode); 914 915 file->f_pos = 0; 916 break; 917 } 918 919 case OTPGETREGIONCOUNT: 920 case OTPGETREGIONINFO: 921 { 922 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 923 if (!buf) 924 return -ENOMEM; 925 ret = -EOPNOTSUPP; 926 switch (mfi->mode) { 927 case MTD_FILE_MODE_OTP_FACTORY: 928 if (mtd->get_fact_prot_info) 929 ret = mtd_get_fact_prot_info(mtd, buf, 4096); 930 break; 931 case MTD_FILE_MODE_OTP_USER: 932 if (mtd->get_user_prot_info) 933 ret = mtd->get_user_prot_info(mtd, buf, 4096); 934 break; 935 default: 936 break; 937 } 938 if (ret >= 0) { 939 if (cmd == OTPGETREGIONCOUNT) { 940 int nbr = ret / sizeof(struct otp_info); 941 ret = copy_to_user(argp, &nbr, sizeof(int)); 942 } else 943 ret = copy_to_user(argp, buf, ret); 944 if (ret) 945 ret = -EFAULT; 946 } 947 kfree(buf); 948 break; 949 } 950 951 case OTPLOCK: 952 { 953 struct otp_info oinfo; 954 955 if (mfi->mode != MTD_FILE_MODE_OTP_USER) 956 return -EINVAL; 957 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 958 return -EFAULT; 959 if (!mtd->lock_user_prot_reg) 960 return -EOPNOTSUPP; 961 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 962 break; 963 } 964 #endif 965 966 /* This ioctl is being deprecated - it truncates the ECC layout */ 967 case ECCGETLAYOUT: 968 { 969 struct nand_ecclayout_user *usrlay; 970 971 if (!mtd->ecclayout) 972 return -EOPNOTSUPP; 973 974 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); 975 if (!usrlay) 976 return -ENOMEM; 977 978 shrink_ecclayout(mtd->ecclayout, usrlay); 979 980 if (copy_to_user(argp, usrlay, sizeof(*usrlay))) 981 ret = -EFAULT; 982 kfree(usrlay); 983 break; 984 } 985 986 case ECCGETSTATS: 987 { 988 if (copy_to_user(argp, &mtd->ecc_stats, 989 sizeof(struct mtd_ecc_stats))) 990 return -EFAULT; 991 break; 992 } 993 994 case MTDFILEMODE: 995 { 996 mfi->mode = 0; 997 998 switch(arg) { 999 case MTD_FILE_MODE_OTP_FACTORY: 1000 case MTD_FILE_MODE_OTP_USER: 1001 ret = otp_select_filemode(mfi, arg); 1002 break; 1003 1004 case MTD_FILE_MODE_RAW: 1005 if (!mtd->read_oob || !mtd->write_oob) 1006 return -EOPNOTSUPP; 1007 mfi->mode = arg; 1008 1009 case MTD_FILE_MODE_NORMAL: 1010 break; 1011 default: 1012 ret = -EINVAL; 1013 } 1014 file->f_pos = 0; 1015 break; 1016 } 1017 1018 case BLKPG: 1019 { 1020 ret = mtdchar_blkpg_ioctl(mtd, 1021 (struct blkpg_ioctl_arg __user *)arg); 1022 break; 1023 } 1024 1025 case BLKRRPART: 1026 { 1027 /* No reread partition feature. Just return ok */ 1028 ret = 0; 1029 break; 1030 } 1031 1032 default: 1033 ret = -ENOTTY; 1034 } 1035 1036 return ret; 1037 } /* memory_ioctl */ 1038 1039 static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 1040 { 1041 int ret; 1042 1043 mutex_lock(&mtd_mutex); 1044 ret = mtdchar_ioctl(file, cmd, arg); 1045 mutex_unlock(&mtd_mutex); 1046 1047 return ret; 1048 } 1049 1050 #ifdef CONFIG_COMPAT 1051 1052 struct mtd_oob_buf32 { 1053 u_int32_t start; 1054 u_int32_t length; 1055 compat_caddr_t ptr; /* unsigned char* */ 1056 }; 1057 1058 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 1059 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 1060 1061 static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, 1062 unsigned long arg) 1063 { 1064 struct mtd_file_info *mfi = file->private_data; 1065 struct mtd_info *mtd = mfi->mtd; 1066 void __user *argp = compat_ptr(arg); 1067 int ret = 0; 1068 1069 mutex_lock(&mtd_mutex); 1070 1071 switch (cmd) { 1072 case MEMWRITEOOB32: 1073 { 1074 struct mtd_oob_buf32 buf; 1075 struct mtd_oob_buf32 __user *buf_user = argp; 1076 1077 if (copy_from_user(&buf, argp, sizeof(buf))) 1078 ret = -EFAULT; 1079 else 1080 ret = mtdchar_writeoob(file, mtd, buf.start, 1081 buf.length, compat_ptr(buf.ptr), 1082 &buf_user->length); 1083 break; 1084 } 1085 1086 case MEMREADOOB32: 1087 { 1088 struct mtd_oob_buf32 buf; 1089 struct mtd_oob_buf32 __user *buf_user = argp; 1090 1091 /* NOTE: writes return length to buf->start */ 1092 if (copy_from_user(&buf, argp, sizeof(buf))) 1093 ret = -EFAULT; 1094 else 1095 ret = mtdchar_readoob(file, mtd, buf.start, 1096 buf.length, compat_ptr(buf.ptr), 1097 &buf_user->start); 1098 break; 1099 } 1100 default: 1101 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp); 1102 } 1103 1104 mutex_unlock(&mtd_mutex); 1105 1106 return ret; 1107 } 1108 1109 #endif /* CONFIG_COMPAT */ 1110 1111 /* 1112 * try to determine where a shared mapping can be made 1113 * - only supported for NOMMU at the moment (MMU can't doesn't copy private 1114 * mappings) 1115 */ 1116 #ifndef CONFIG_MMU 1117 static unsigned long mtdchar_get_unmapped_area(struct file *file, 1118 unsigned long addr, 1119 unsigned long len, 1120 unsigned long pgoff, 1121 unsigned long flags) 1122 { 1123 struct mtd_file_info *mfi = file->private_data; 1124 struct mtd_info *mtd = mfi->mtd; 1125 1126 if (mtd->get_unmapped_area) { 1127 unsigned long offset; 1128 1129 if (addr != 0) 1130 return (unsigned long) -EINVAL; 1131 1132 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1133 return (unsigned long) -EINVAL; 1134 1135 offset = pgoff << PAGE_SHIFT; 1136 if (offset > mtd->size - len) 1137 return (unsigned long) -EINVAL; 1138 1139 return mtd_get_unmapped_area(mtd, len, offset, flags); 1140 } 1141 1142 /* can't map directly */ 1143 return (unsigned long) -ENOSYS; 1144 } 1145 #endif 1146 1147 /* 1148 * set up a mapping for shared memory segments 1149 */ 1150 static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) 1151 { 1152 #ifdef CONFIG_MMU 1153 struct mtd_file_info *mfi = file->private_data; 1154 struct mtd_info *mtd = mfi->mtd; 1155 struct map_info *map = mtd->priv; 1156 unsigned long start; 1157 unsigned long off; 1158 u32 len; 1159 1160 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { 1161 off = vma->vm_pgoff << PAGE_SHIFT; 1162 start = map->phys; 1163 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); 1164 start &= PAGE_MASK; 1165 if ((vma->vm_end - vma->vm_start + off) > len) 1166 return -EINVAL; 1167 1168 off += start; 1169 vma->vm_pgoff = off >> PAGE_SHIFT; 1170 vma->vm_flags |= VM_IO | VM_RESERVED; 1171 1172 #ifdef pgprot_noncached 1173 if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) 1174 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1175 #endif 1176 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1177 vma->vm_end - vma->vm_start, 1178 vma->vm_page_prot)) 1179 return -EAGAIN; 1180 1181 return 0; 1182 } 1183 return -ENOSYS; 1184 #else 1185 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS; 1186 #endif 1187 } 1188 1189 static const struct file_operations mtd_fops = { 1190 .owner = THIS_MODULE, 1191 .llseek = mtdchar_lseek, 1192 .read = mtdchar_read, 1193 .write = mtdchar_write, 1194 .unlocked_ioctl = mtdchar_unlocked_ioctl, 1195 #ifdef CONFIG_COMPAT 1196 .compat_ioctl = mtdchar_compat_ioctl, 1197 #endif 1198 .open = mtdchar_open, 1199 .release = mtdchar_close, 1200 .mmap = mtdchar_mmap, 1201 #ifndef CONFIG_MMU 1202 .get_unmapped_area = mtdchar_get_unmapped_area, 1203 #endif 1204 }; 1205 1206 static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, 1207 int flags, const char *dev_name, void *data) 1208 { 1209 return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC); 1210 } 1211 1212 static struct file_system_type mtd_inodefs_type = { 1213 .name = "mtd_inodefs", 1214 .mount = mtd_inodefs_mount, 1215 .kill_sb = kill_anon_super, 1216 }; 1217 1218 static void mtdchar_notify_add(struct mtd_info *mtd) 1219 { 1220 } 1221 1222 static void mtdchar_notify_remove(struct mtd_info *mtd) 1223 { 1224 struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index); 1225 1226 if (mtd_ino) { 1227 /* Destroy the inode if it exists */ 1228 clear_nlink(mtd_ino); 1229 iput(mtd_ino); 1230 } 1231 } 1232 1233 static struct mtd_notifier mtdchar_notifier = { 1234 .add = mtdchar_notify_add, 1235 .remove = mtdchar_notify_remove, 1236 }; 1237 1238 static int __init init_mtdchar(void) 1239 { 1240 int ret; 1241 1242 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, 1243 "mtd", &mtd_fops); 1244 if (ret < 0) { 1245 pr_notice("Can't allocate major number %d for " 1246 "Memory Technology Devices.\n", MTD_CHAR_MAJOR); 1247 return ret; 1248 } 1249 1250 ret = register_filesystem(&mtd_inodefs_type); 1251 if (ret) { 1252 pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); 1253 goto err_unregister_chdev; 1254 } 1255 1256 mtd_inode_mnt = kern_mount(&mtd_inodefs_type); 1257 if (IS_ERR(mtd_inode_mnt)) { 1258 ret = PTR_ERR(mtd_inode_mnt); 1259 pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret); 1260 goto err_unregister_filesystem; 1261 } 1262 register_mtd_user(&mtdchar_notifier); 1263 1264 return ret; 1265 1266 err_unregister_filesystem: 1267 unregister_filesystem(&mtd_inodefs_type); 1268 err_unregister_chdev: 1269 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1270 return ret; 1271 } 1272 1273 static void __exit cleanup_mtdchar(void) 1274 { 1275 unregister_mtd_user(&mtdchar_notifier); 1276 kern_unmount(mtd_inode_mnt); 1277 unregister_filesystem(&mtd_inodefs_type); 1278 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1279 } 1280 1281 module_init(init_mtdchar); 1282 module_exit(cleanup_mtdchar); 1283 1284 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1285 1286 MODULE_LICENSE("GPL"); 1287 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 1288 MODULE_DESCRIPTION("Direct character-device access to MTD devices"); 1289 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1290