1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 4 */ 5 6 #include <linux/device.h> 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/mutex.h> 16 #include <linux/backing-dev.h> 17 #include <linux/compat.h> 18 #include <linux/mount.h> 19 #include <linux/blkpg.h> 20 #include <linux/magic.h> 21 #include <linux/major.h> 22 #include <linux/mtd/mtd.h> 23 #include <linux/mtd/partitions.h> 24 #include <linux/mtd/map.h> 25 26 #include <linux/uaccess.h> 27 28 #include "mtdcore.h" 29 30 static DEFINE_MUTEX(mtd_mutex); 31 32 /* 33 * Data structure to hold the pointer to the mtd device as well 34 * as mode information of various use cases. 35 */ 36 struct mtd_file_info { 37 struct mtd_info *mtd; 38 enum mtd_file_modes mode; 39 }; 40 41 static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig) 42 { 43 struct mtd_file_info *mfi = file->private_data; 44 return fixed_size_llseek(file, offset, orig, mfi->mtd->size); 45 } 46 47 static int mtdchar_open(struct inode *inode, struct file *file) 48 { 49 int minor = iminor(inode); 50 int devnum = minor >> 1; 51 int ret = 0; 52 struct mtd_info *mtd; 53 struct mtd_file_info *mfi; 54 55 pr_debug("MTD_open\n"); 56 57 /* You can't open the RO devices RW */ 58 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 59 return -EACCES; 60 61 mutex_lock(&mtd_mutex); 62 mtd = get_mtd_device(NULL, devnum); 63 64 if (IS_ERR(mtd)) { 65 ret = PTR_ERR(mtd); 66 goto out; 67 } 68 69 if (mtd->type == MTD_ABSENT) { 70 ret = -ENODEV; 71 goto out1; 72 } 73 74 /* You can't open it RW if it's not a writeable device */ 75 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 76 ret = -EACCES; 77 goto out1; 78 } 79 80 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 81 if (!mfi) { 82 ret = -ENOMEM; 83 goto out1; 84 } 85 mfi->mtd = mtd; 86 file->private_data = mfi; 87 mutex_unlock(&mtd_mutex); 88 return 0; 89 90 out1: 91 put_mtd_device(mtd); 92 out: 93 mutex_unlock(&mtd_mutex); 94 return ret; 95 } /* mtdchar_open */ 96 97 /*====================================================================*/ 98 99 static int mtdchar_close(struct inode *inode, struct file *file) 100 { 101 struct mtd_file_info *mfi = file->private_data; 102 struct mtd_info *mtd = mfi->mtd; 103 104 pr_debug("MTD_close\n"); 105 106 /* Only sync if opened RW */ 107 if ((file->f_mode & FMODE_WRITE)) 108 mtd_sync(mtd); 109 110 put_mtd_device(mtd); 111 file->private_data = NULL; 112 kfree(mfi); 113 114 return 0; 115 } /* mtdchar_close */ 116 117 /* Back in June 2001, dwmw2 wrote: 118 * 119 * FIXME: This _really_ needs to die. In 2.5, we should lock the 120 * userspace buffer down and use it directly with readv/writev. 121 * 122 * The implementation below, using mtd_kmalloc_up_to, mitigates 123 * allocation failures when the system is under low-memory situations 124 * or if memory is highly fragmented at the cost of reducing the 125 * performance of the requested transfer due to a smaller buffer size. 126 * 127 * A more complex but more memory-efficient implementation based on 128 * get_user_pages and iovecs to cover extents of those pages is a 129 * longer-term goal, as intimated by dwmw2 above. However, for the 130 * write case, this requires yet more complex head and tail transfer 131 * handling when those head and tail offsets and sizes are such that 132 * alignment requirements are not met in the NAND subdriver. 133 */ 134 135 static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, 136 loff_t *ppos) 137 { 138 struct mtd_file_info *mfi = file->private_data; 139 struct mtd_info *mtd = mfi->mtd; 140 size_t retlen; 141 size_t total_retlen=0; 142 int ret=0; 143 int len; 144 size_t size = count; 145 char *kbuf; 146 147 pr_debug("MTD_read\n"); 148 149 if (*ppos + count > mtd->size) { 150 if (*ppos < mtd->size) 151 count = mtd->size - *ppos; 152 else 153 count = 0; 154 } 155 156 if (!count) 157 return 0; 158 159 kbuf = mtd_kmalloc_up_to(mtd, &size); 160 if (!kbuf) 161 return -ENOMEM; 162 163 while (count) { 164 len = min_t(size_t, count, size); 165 166 switch (mfi->mode) { 167 case MTD_FILE_MODE_OTP_FACTORY: 168 ret = mtd_read_fact_prot_reg(mtd, *ppos, len, 169 &retlen, kbuf); 170 break; 171 case MTD_FILE_MODE_OTP_USER: 172 ret = mtd_read_user_prot_reg(mtd, *ppos, len, 173 &retlen, kbuf); 174 break; 175 case MTD_FILE_MODE_RAW: 176 { 177 struct mtd_oob_ops ops = {}; 178 179 ops.mode = MTD_OPS_RAW; 180 ops.datbuf = kbuf; 181 ops.oobbuf = NULL; 182 ops.len = len; 183 184 ret = mtd_read_oob(mtd, *ppos, &ops); 185 retlen = ops.retlen; 186 break; 187 } 188 default: 189 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf); 190 } 191 /* Nand returns -EBADMSG on ECC errors, but it returns 192 * the data. For our userspace tools it is important 193 * to dump areas with ECC errors! 194 * For kernel internal usage it also might return -EUCLEAN 195 * to signal the caller that a bitflip has occurred and has 196 * been corrected by the ECC algorithm. 197 * Userspace software which accesses NAND this way 198 * must be aware of the fact that it deals with NAND 199 */ 200 if (!ret || mtd_is_bitflip_or_eccerr(ret)) { 201 *ppos += retlen; 202 if (copy_to_user(buf, kbuf, retlen)) { 203 kfree(kbuf); 204 return -EFAULT; 205 } 206 else 207 total_retlen += retlen; 208 209 count -= retlen; 210 buf += retlen; 211 if (retlen == 0) 212 count = 0; 213 } 214 else { 215 kfree(kbuf); 216 return ret; 217 } 218 219 } 220 221 kfree(kbuf); 222 return total_retlen; 223 } /* mtdchar_read */ 224 225 static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count, 226 loff_t *ppos) 227 { 228 struct mtd_file_info *mfi = file->private_data; 229 struct mtd_info *mtd = mfi->mtd; 230 size_t size = count; 231 char *kbuf; 232 size_t retlen; 233 size_t total_retlen=0; 234 int ret=0; 235 int len; 236 237 pr_debug("MTD_write\n"); 238 239 if (*ppos >= mtd->size) 240 return -ENOSPC; 241 242 if (*ppos + count > mtd->size) 243 count = mtd->size - *ppos; 244 245 if (!count) 246 return 0; 247 248 kbuf = mtd_kmalloc_up_to(mtd, &size); 249 if (!kbuf) 250 return -ENOMEM; 251 252 while (count) { 253 len = min_t(size_t, count, size); 254 255 if (copy_from_user(kbuf, buf, len)) { 256 kfree(kbuf); 257 return -EFAULT; 258 } 259 260 switch (mfi->mode) { 261 case MTD_FILE_MODE_OTP_FACTORY: 262 ret = -EROFS; 263 break; 264 case MTD_FILE_MODE_OTP_USER: 265 ret = mtd_write_user_prot_reg(mtd, *ppos, len, 266 &retlen, kbuf); 267 break; 268 269 case MTD_FILE_MODE_RAW: 270 { 271 struct mtd_oob_ops ops = {}; 272 273 ops.mode = MTD_OPS_RAW; 274 ops.datbuf = kbuf; 275 ops.oobbuf = NULL; 276 ops.ooboffs = 0; 277 ops.len = len; 278 279 ret = mtd_write_oob(mtd, *ppos, &ops); 280 retlen = ops.retlen; 281 break; 282 } 283 284 default: 285 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf); 286 } 287 288 /* 289 * Return -ENOSPC only if no data could be written at all. 290 * Otherwise just return the number of bytes that actually 291 * have been written. 292 */ 293 if ((ret == -ENOSPC) && (total_retlen)) 294 break; 295 296 if (!ret) { 297 *ppos += retlen; 298 total_retlen += retlen; 299 count -= retlen; 300 buf += retlen; 301 } 302 else { 303 kfree(kbuf); 304 return ret; 305 } 306 } 307 308 kfree(kbuf); 309 return total_retlen; 310 } /* mtdchar_write */ 311 312 /*====================================================================== 313 314 IOCTL calls for getting device parameters. 315 316 ======================================================================*/ 317 318 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 319 { 320 struct mtd_info *mtd = mfi->mtd; 321 size_t retlen; 322 323 switch (mode) { 324 case MTD_OTP_FACTORY: 325 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == 326 -EOPNOTSUPP) 327 return -EOPNOTSUPP; 328 329 mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 330 break; 331 case MTD_OTP_USER: 332 if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) == 333 -EOPNOTSUPP) 334 return -EOPNOTSUPP; 335 336 mfi->mode = MTD_FILE_MODE_OTP_USER; 337 break; 338 case MTD_OTP_OFF: 339 mfi->mode = MTD_FILE_MODE_NORMAL; 340 break; 341 default: 342 return -EINVAL; 343 } 344 345 return 0; 346 } 347 348 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, 349 uint64_t start, uint32_t length, void __user *ptr, 350 uint32_t __user *retp) 351 { 352 struct mtd_file_info *mfi = file->private_data; 353 struct mtd_oob_ops ops = {}; 354 uint32_t retlen; 355 int ret = 0; 356 357 if (!(file->f_mode & FMODE_WRITE)) 358 return -EPERM; 359 360 if (length > 4096) 361 return -EINVAL; 362 363 if (!mtd->_write_oob) 364 return -EOPNOTSUPP; 365 366 ops.ooblen = length; 367 ops.ooboffs = start & (mtd->writesize - 1); 368 ops.datbuf = NULL; 369 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 370 MTD_OPS_PLACE_OOB; 371 372 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 373 return -EINVAL; 374 375 ops.oobbuf = memdup_user(ptr, length); 376 if (IS_ERR(ops.oobbuf)) 377 return PTR_ERR(ops.oobbuf); 378 379 start &= ~((uint64_t)mtd->writesize - 1); 380 ret = mtd_write_oob(mtd, start, &ops); 381 382 if (ops.oobretlen > 0xFFFFFFFFU) 383 ret = -EOVERFLOW; 384 retlen = ops.oobretlen; 385 if (copy_to_user(retp, &retlen, sizeof(length))) 386 ret = -EFAULT; 387 388 kfree(ops.oobbuf); 389 return ret; 390 } 391 392 static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, 393 uint64_t start, uint32_t length, void __user *ptr, 394 uint32_t __user *retp) 395 { 396 struct mtd_file_info *mfi = file->private_data; 397 struct mtd_oob_ops ops = {}; 398 int ret = 0; 399 400 if (length > 4096) 401 return -EINVAL; 402 403 ops.ooblen = length; 404 ops.ooboffs = start & (mtd->writesize - 1); 405 ops.datbuf = NULL; 406 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 407 MTD_OPS_PLACE_OOB; 408 409 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 410 return -EINVAL; 411 412 ops.oobbuf = kmalloc(length, GFP_KERNEL); 413 if (!ops.oobbuf) 414 return -ENOMEM; 415 416 start &= ~((uint64_t)mtd->writesize - 1); 417 ret = mtd_read_oob(mtd, start, &ops); 418 419 if (put_user(ops.oobretlen, retp)) 420 ret = -EFAULT; 421 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, 422 ops.oobretlen)) 423 ret = -EFAULT; 424 425 kfree(ops.oobbuf); 426 427 /* 428 * NAND returns -EBADMSG on ECC errors, but it returns the OOB 429 * data. For our userspace tools it is important to dump areas 430 * with ECC errors! 431 * For kernel internal usage it also might return -EUCLEAN 432 * to signal the caller that a bitflip has occurred and has 433 * been corrected by the ECC algorithm. 434 * 435 * Note: currently the standard NAND function, nand_read_oob_std, 436 * does not calculate ECC for the OOB area, so do not rely on 437 * this behavior unless you have replaced it with your own. 438 */ 439 if (mtd_is_bitflip_or_eccerr(ret)) 440 return 0; 441 442 return ret; 443 } 444 445 /* 446 * Copies (and truncates, if necessary) OOB layout information to the 447 * deprecated layout struct, nand_ecclayout_user. This is necessary only to 448 * support the deprecated API ioctl ECCGETLAYOUT while allowing all new 449 * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops 450 * can describe any kind of OOB layout with almost zero overhead from a 451 * memory usage point of view). 452 */ 453 static int shrink_ecclayout(struct mtd_info *mtd, 454 struct nand_ecclayout_user *to) 455 { 456 struct mtd_oob_region oobregion; 457 int i, section = 0, ret; 458 459 if (!mtd || !to) 460 return -EINVAL; 461 462 memset(to, 0, sizeof(*to)); 463 464 to->eccbytes = 0; 465 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { 466 u32 eccpos; 467 468 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 469 if (ret < 0) { 470 if (ret != -ERANGE) 471 return ret; 472 473 break; 474 } 475 476 eccpos = oobregion.offset; 477 for (; i < MTD_MAX_ECCPOS_ENTRIES && 478 eccpos < oobregion.offset + oobregion.length; i++) { 479 to->eccpos[i] = eccpos++; 480 to->eccbytes++; 481 } 482 } 483 484 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { 485 ret = mtd_ooblayout_free(mtd, i, &oobregion); 486 if (ret < 0) { 487 if (ret != -ERANGE) 488 return ret; 489 490 break; 491 } 492 493 to->oobfree[i].offset = oobregion.offset; 494 to->oobfree[i].length = oobregion.length; 495 to->oobavail += to->oobfree[i].length; 496 } 497 498 return 0; 499 } 500 501 static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) 502 { 503 struct mtd_oob_region oobregion; 504 int i, section = 0, ret; 505 506 if (!mtd || !to) 507 return -EINVAL; 508 509 memset(to, 0, sizeof(*to)); 510 511 to->eccbytes = 0; 512 for (i = 0; i < ARRAY_SIZE(to->eccpos);) { 513 u32 eccpos; 514 515 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 516 if (ret < 0) { 517 if (ret != -ERANGE) 518 return ret; 519 520 break; 521 } 522 523 if (oobregion.length + i > ARRAY_SIZE(to->eccpos)) 524 return -EINVAL; 525 526 eccpos = oobregion.offset; 527 for (; eccpos < oobregion.offset + oobregion.length; i++) { 528 to->eccpos[i] = eccpos++; 529 to->eccbytes++; 530 } 531 } 532 533 for (i = 0; i < 8; i++) { 534 ret = mtd_ooblayout_free(mtd, i, &oobregion); 535 if (ret < 0) { 536 if (ret != -ERANGE) 537 return ret; 538 539 break; 540 } 541 542 to->oobfree[i][0] = oobregion.offset; 543 to->oobfree[i][1] = oobregion.length; 544 } 545 546 to->useecc = MTD_NANDECC_AUTOPLACE; 547 548 return 0; 549 } 550 551 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, 552 struct blkpg_ioctl_arg *arg) 553 { 554 struct blkpg_partition p; 555 556 if (!capable(CAP_SYS_ADMIN)) 557 return -EPERM; 558 559 if (copy_from_user(&p, arg->data, sizeof(p))) 560 return -EFAULT; 561 562 switch (arg->op) { 563 case BLKPG_ADD_PARTITION: 564 565 /* Only master mtd device must be used to add partitions */ 566 if (mtd_is_partition(mtd)) 567 return -EINVAL; 568 569 /* Sanitize user input */ 570 p.devname[BLKPG_DEVNAMELTH - 1] = '\0'; 571 572 return mtd_add_partition(mtd, p.devname, p.start, p.length); 573 574 case BLKPG_DEL_PARTITION: 575 576 if (p.pno < 0) 577 return -EINVAL; 578 579 return mtd_del_partition(mtd, p.pno); 580 581 default: 582 return -EINVAL; 583 } 584 } 585 586 static int mtdchar_write_ioctl(struct mtd_info *mtd, 587 struct mtd_write_req __user *argp) 588 { 589 struct mtd_write_req req; 590 struct mtd_oob_ops ops = {}; 591 const void __user *usr_data, *usr_oob; 592 int ret; 593 594 if (copy_from_user(&req, argp, sizeof(req))) 595 return -EFAULT; 596 597 usr_data = (const void __user *)(uintptr_t)req.usr_data; 598 usr_oob = (const void __user *)(uintptr_t)req.usr_oob; 599 600 if (!mtd->_write_oob) 601 return -EOPNOTSUPP; 602 603 ops.mode = req.mode; 604 ops.len = (size_t)req.len; 605 ops.ooblen = (size_t)req.ooblen; 606 ops.ooboffs = 0; 607 608 if (usr_data) { 609 ops.datbuf = memdup_user(usr_data, ops.len); 610 if (IS_ERR(ops.datbuf)) 611 return PTR_ERR(ops.datbuf); 612 } else { 613 ops.datbuf = NULL; 614 } 615 616 if (usr_oob) { 617 ops.oobbuf = memdup_user(usr_oob, ops.ooblen); 618 if (IS_ERR(ops.oobbuf)) { 619 kfree(ops.datbuf); 620 return PTR_ERR(ops.oobbuf); 621 } 622 } else { 623 ops.oobbuf = NULL; 624 } 625 626 ret = mtd_write_oob(mtd, (loff_t)req.start, &ops); 627 628 kfree(ops.datbuf); 629 kfree(ops.oobbuf); 630 631 return ret; 632 } 633 634 static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) 635 { 636 struct mtd_file_info *mfi = file->private_data; 637 struct mtd_info *mtd = mfi->mtd; 638 void __user *argp = (void __user *)arg; 639 int ret = 0; 640 struct mtd_info_user info; 641 642 pr_debug("MTD_ioctl\n"); 643 644 switch (cmd) { 645 case MEMGETREGIONCOUNT: 646 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 647 return -EFAULT; 648 break; 649 650 case MEMGETREGIONINFO: 651 { 652 uint32_t ur_idx; 653 struct mtd_erase_region_info *kr; 654 struct region_info_user __user *ur = argp; 655 656 if (get_user(ur_idx, &(ur->regionindex))) 657 return -EFAULT; 658 659 if (ur_idx >= mtd->numeraseregions) 660 return -EINVAL; 661 662 kr = &(mtd->eraseregions[ur_idx]); 663 664 if (put_user(kr->offset, &(ur->offset)) 665 || put_user(kr->erasesize, &(ur->erasesize)) 666 || put_user(kr->numblocks, &(ur->numblocks))) 667 return -EFAULT; 668 669 break; 670 } 671 672 case MEMGETINFO: 673 memset(&info, 0, sizeof(info)); 674 info.type = mtd->type; 675 info.flags = mtd->flags; 676 info.size = mtd->size; 677 info.erasesize = mtd->erasesize; 678 info.writesize = mtd->writesize; 679 info.oobsize = mtd->oobsize; 680 /* The below field is obsolete */ 681 info.padding = 0; 682 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 683 return -EFAULT; 684 break; 685 686 case MEMERASE: 687 case MEMERASE64: 688 { 689 struct erase_info *erase; 690 691 if(!(file->f_mode & FMODE_WRITE)) 692 return -EPERM; 693 694 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 695 if (!erase) 696 ret = -ENOMEM; 697 else { 698 if (cmd == MEMERASE64) { 699 struct erase_info_user64 einfo64; 700 701 if (copy_from_user(&einfo64, argp, 702 sizeof(struct erase_info_user64))) { 703 kfree(erase); 704 return -EFAULT; 705 } 706 erase->addr = einfo64.start; 707 erase->len = einfo64.length; 708 } else { 709 struct erase_info_user einfo32; 710 711 if (copy_from_user(&einfo32, argp, 712 sizeof(struct erase_info_user))) { 713 kfree(erase); 714 return -EFAULT; 715 } 716 erase->addr = einfo32.start; 717 erase->len = einfo32.length; 718 } 719 720 ret = mtd_erase(mtd, erase); 721 kfree(erase); 722 } 723 break; 724 } 725 726 case MEMWRITEOOB: 727 { 728 struct mtd_oob_buf buf; 729 struct mtd_oob_buf __user *buf_user = argp; 730 731 /* NOTE: writes return length to buf_user->length */ 732 if (copy_from_user(&buf, argp, sizeof(buf))) 733 ret = -EFAULT; 734 else 735 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 736 buf.ptr, &buf_user->length); 737 break; 738 } 739 740 case MEMREADOOB: 741 { 742 struct mtd_oob_buf buf; 743 struct mtd_oob_buf __user *buf_user = argp; 744 745 /* NOTE: writes return length to buf_user->start */ 746 if (copy_from_user(&buf, argp, sizeof(buf))) 747 ret = -EFAULT; 748 else 749 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 750 buf.ptr, &buf_user->start); 751 break; 752 } 753 754 case MEMWRITEOOB64: 755 { 756 struct mtd_oob_buf64 buf; 757 struct mtd_oob_buf64 __user *buf_user = argp; 758 759 if (copy_from_user(&buf, argp, sizeof(buf))) 760 ret = -EFAULT; 761 else 762 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 763 (void __user *)(uintptr_t)buf.usr_ptr, 764 &buf_user->length); 765 break; 766 } 767 768 case MEMREADOOB64: 769 { 770 struct mtd_oob_buf64 buf; 771 struct mtd_oob_buf64 __user *buf_user = argp; 772 773 if (copy_from_user(&buf, argp, sizeof(buf))) 774 ret = -EFAULT; 775 else 776 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 777 (void __user *)(uintptr_t)buf.usr_ptr, 778 &buf_user->length); 779 break; 780 } 781 782 case MEMWRITE: 783 { 784 ret = mtdchar_write_ioctl(mtd, 785 (struct mtd_write_req __user *)arg); 786 break; 787 } 788 789 case MEMLOCK: 790 { 791 struct erase_info_user einfo; 792 793 if (copy_from_user(&einfo, argp, sizeof(einfo))) 794 return -EFAULT; 795 796 ret = mtd_lock(mtd, einfo.start, einfo.length); 797 break; 798 } 799 800 case MEMUNLOCK: 801 { 802 struct erase_info_user einfo; 803 804 if (copy_from_user(&einfo, argp, sizeof(einfo))) 805 return -EFAULT; 806 807 ret = mtd_unlock(mtd, einfo.start, einfo.length); 808 break; 809 } 810 811 case MEMISLOCKED: 812 { 813 struct erase_info_user einfo; 814 815 if (copy_from_user(&einfo, argp, sizeof(einfo))) 816 return -EFAULT; 817 818 ret = mtd_is_locked(mtd, einfo.start, einfo.length); 819 break; 820 } 821 822 /* Legacy interface */ 823 case MEMGETOOBSEL: 824 { 825 struct nand_oobinfo oi; 826 827 if (!mtd->ooblayout) 828 return -EOPNOTSUPP; 829 830 ret = get_oobinfo(mtd, &oi); 831 if (ret) 832 return ret; 833 834 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 835 return -EFAULT; 836 break; 837 } 838 839 case MEMGETBADBLOCK: 840 { 841 loff_t offs; 842 843 if (copy_from_user(&offs, argp, sizeof(loff_t))) 844 return -EFAULT; 845 return mtd_block_isbad(mtd, offs); 846 break; 847 } 848 849 case MEMSETBADBLOCK: 850 { 851 loff_t offs; 852 853 if (copy_from_user(&offs, argp, sizeof(loff_t))) 854 return -EFAULT; 855 return mtd_block_markbad(mtd, offs); 856 break; 857 } 858 859 case OTPSELECT: 860 { 861 int mode; 862 if (copy_from_user(&mode, argp, sizeof(int))) 863 return -EFAULT; 864 865 mfi->mode = MTD_FILE_MODE_NORMAL; 866 867 ret = otp_select_filemode(mfi, mode); 868 869 file->f_pos = 0; 870 break; 871 } 872 873 case OTPGETREGIONCOUNT: 874 case OTPGETREGIONINFO: 875 { 876 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 877 size_t retlen; 878 if (!buf) 879 return -ENOMEM; 880 switch (mfi->mode) { 881 case MTD_FILE_MODE_OTP_FACTORY: 882 ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf); 883 break; 884 case MTD_FILE_MODE_OTP_USER: 885 ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf); 886 break; 887 default: 888 ret = -EINVAL; 889 break; 890 } 891 if (!ret) { 892 if (cmd == OTPGETREGIONCOUNT) { 893 int nbr = retlen / sizeof(struct otp_info); 894 ret = copy_to_user(argp, &nbr, sizeof(int)); 895 } else 896 ret = copy_to_user(argp, buf, retlen); 897 if (ret) 898 ret = -EFAULT; 899 } 900 kfree(buf); 901 break; 902 } 903 904 case OTPLOCK: 905 { 906 struct otp_info oinfo; 907 908 if (mfi->mode != MTD_FILE_MODE_OTP_USER) 909 return -EINVAL; 910 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 911 return -EFAULT; 912 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 913 break; 914 } 915 916 /* This ioctl is being deprecated - it truncates the ECC layout */ 917 case ECCGETLAYOUT: 918 { 919 struct nand_ecclayout_user *usrlay; 920 921 if (!mtd->ooblayout) 922 return -EOPNOTSUPP; 923 924 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); 925 if (!usrlay) 926 return -ENOMEM; 927 928 shrink_ecclayout(mtd, usrlay); 929 930 if (copy_to_user(argp, usrlay, sizeof(*usrlay))) 931 ret = -EFAULT; 932 kfree(usrlay); 933 break; 934 } 935 936 case ECCGETSTATS: 937 { 938 if (copy_to_user(argp, &mtd->ecc_stats, 939 sizeof(struct mtd_ecc_stats))) 940 return -EFAULT; 941 break; 942 } 943 944 case MTDFILEMODE: 945 { 946 mfi->mode = 0; 947 948 switch(arg) { 949 case MTD_FILE_MODE_OTP_FACTORY: 950 case MTD_FILE_MODE_OTP_USER: 951 ret = otp_select_filemode(mfi, arg); 952 break; 953 954 case MTD_FILE_MODE_RAW: 955 if (!mtd_has_oob(mtd)) 956 return -EOPNOTSUPP; 957 mfi->mode = arg; 958 959 case MTD_FILE_MODE_NORMAL: 960 break; 961 default: 962 ret = -EINVAL; 963 } 964 file->f_pos = 0; 965 break; 966 } 967 968 case BLKPG: 969 { 970 struct blkpg_ioctl_arg __user *blk_arg = argp; 971 struct blkpg_ioctl_arg a; 972 973 if (copy_from_user(&a, blk_arg, sizeof(a))) 974 ret = -EFAULT; 975 else 976 ret = mtdchar_blkpg_ioctl(mtd, &a); 977 break; 978 } 979 980 case BLKRRPART: 981 { 982 /* No reread partition feature. Just return ok */ 983 ret = 0; 984 break; 985 } 986 987 default: 988 ret = -ENOTTY; 989 } 990 991 return ret; 992 } /* memory_ioctl */ 993 994 static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 995 { 996 int ret; 997 998 mutex_lock(&mtd_mutex); 999 ret = mtdchar_ioctl(file, cmd, arg); 1000 mutex_unlock(&mtd_mutex); 1001 1002 return ret; 1003 } 1004 1005 #ifdef CONFIG_COMPAT 1006 1007 struct mtd_oob_buf32 { 1008 u_int32_t start; 1009 u_int32_t length; 1010 compat_caddr_t ptr; /* unsigned char* */ 1011 }; 1012 1013 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 1014 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 1015 1016 static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, 1017 unsigned long arg) 1018 { 1019 struct mtd_file_info *mfi = file->private_data; 1020 struct mtd_info *mtd = mfi->mtd; 1021 void __user *argp = compat_ptr(arg); 1022 int ret = 0; 1023 1024 mutex_lock(&mtd_mutex); 1025 1026 switch (cmd) { 1027 case MEMWRITEOOB32: 1028 { 1029 struct mtd_oob_buf32 buf; 1030 struct mtd_oob_buf32 __user *buf_user = argp; 1031 1032 if (copy_from_user(&buf, argp, sizeof(buf))) 1033 ret = -EFAULT; 1034 else 1035 ret = mtdchar_writeoob(file, mtd, buf.start, 1036 buf.length, compat_ptr(buf.ptr), 1037 &buf_user->length); 1038 break; 1039 } 1040 1041 case MEMREADOOB32: 1042 { 1043 struct mtd_oob_buf32 buf; 1044 struct mtd_oob_buf32 __user *buf_user = argp; 1045 1046 /* NOTE: writes return length to buf->start */ 1047 if (copy_from_user(&buf, argp, sizeof(buf))) 1048 ret = -EFAULT; 1049 else 1050 ret = mtdchar_readoob(file, mtd, buf.start, 1051 buf.length, compat_ptr(buf.ptr), 1052 &buf_user->start); 1053 break; 1054 } 1055 1056 case BLKPG: 1057 { 1058 /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */ 1059 struct blkpg_compat_ioctl_arg __user *uarg = argp; 1060 struct blkpg_compat_ioctl_arg compat_arg; 1061 struct blkpg_ioctl_arg a; 1062 1063 if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) { 1064 ret = -EFAULT; 1065 break; 1066 } 1067 1068 memset(&a, 0, sizeof(a)); 1069 a.op = compat_arg.op; 1070 a.flags = compat_arg.flags; 1071 a.datalen = compat_arg.datalen; 1072 a.data = compat_ptr(compat_arg.data); 1073 1074 ret = mtdchar_blkpg_ioctl(mtd, &a); 1075 break; 1076 } 1077 1078 default: 1079 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp); 1080 } 1081 1082 mutex_unlock(&mtd_mutex); 1083 1084 return ret; 1085 } 1086 1087 #endif /* CONFIG_COMPAT */ 1088 1089 /* 1090 * try to determine where a shared mapping can be made 1091 * - only supported for NOMMU at the moment (MMU can't doesn't copy private 1092 * mappings) 1093 */ 1094 #ifndef CONFIG_MMU 1095 static unsigned long mtdchar_get_unmapped_area(struct file *file, 1096 unsigned long addr, 1097 unsigned long len, 1098 unsigned long pgoff, 1099 unsigned long flags) 1100 { 1101 struct mtd_file_info *mfi = file->private_data; 1102 struct mtd_info *mtd = mfi->mtd; 1103 unsigned long offset; 1104 int ret; 1105 1106 if (addr != 0) 1107 return (unsigned long) -EINVAL; 1108 1109 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1110 return (unsigned long) -EINVAL; 1111 1112 offset = pgoff << PAGE_SHIFT; 1113 if (offset > mtd->size - len) 1114 return (unsigned long) -EINVAL; 1115 1116 ret = mtd_get_unmapped_area(mtd, len, offset, flags); 1117 return ret == -EOPNOTSUPP ? -ENODEV : ret; 1118 } 1119 1120 static unsigned mtdchar_mmap_capabilities(struct file *file) 1121 { 1122 struct mtd_file_info *mfi = file->private_data; 1123 1124 return mtd_mmap_capabilities(mfi->mtd); 1125 } 1126 #endif 1127 1128 /* 1129 * set up a mapping for shared memory segments 1130 */ 1131 static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) 1132 { 1133 #ifdef CONFIG_MMU 1134 struct mtd_file_info *mfi = file->private_data; 1135 struct mtd_info *mtd = mfi->mtd; 1136 struct map_info *map = mtd->priv; 1137 1138 /* This is broken because it assumes the MTD device is map-based 1139 and that mtd->priv is a valid struct map_info. It should be 1140 replaced with something that uses the mtd_get_unmapped_area() 1141 operation properly. */ 1142 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { 1143 #ifdef pgprot_noncached 1144 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) 1145 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1146 #endif 1147 return vm_iomap_memory(vma, map->phys, map->size); 1148 } 1149 return -ENODEV; 1150 #else 1151 return vma->vm_flags & VM_SHARED ? 0 : -EACCES; 1152 #endif 1153 } 1154 1155 static const struct file_operations mtd_fops = { 1156 .owner = THIS_MODULE, 1157 .llseek = mtdchar_lseek, 1158 .read = mtdchar_read, 1159 .write = mtdchar_write, 1160 .unlocked_ioctl = mtdchar_unlocked_ioctl, 1161 #ifdef CONFIG_COMPAT 1162 .compat_ioctl = mtdchar_compat_ioctl, 1163 #endif 1164 .open = mtdchar_open, 1165 .release = mtdchar_close, 1166 .mmap = mtdchar_mmap, 1167 #ifndef CONFIG_MMU 1168 .get_unmapped_area = mtdchar_get_unmapped_area, 1169 .mmap_capabilities = mtdchar_mmap_capabilities, 1170 #endif 1171 }; 1172 1173 int __init init_mtdchar(void) 1174 { 1175 int ret; 1176 1177 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, 1178 "mtd", &mtd_fops); 1179 if (ret < 0) { 1180 pr_err("Can't allocate major number %d for MTD\n", 1181 MTD_CHAR_MAJOR); 1182 return ret; 1183 } 1184 1185 return ret; 1186 } 1187 1188 void __exit cleanup_mtdchar(void) 1189 { 1190 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1191 } 1192 1193 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1194