1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 4 */ 5 6 #include <linux/device.h> 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/mutex.h> 16 #include <linux/backing-dev.h> 17 #include <linux/compat.h> 18 #include <linux/mount.h> 19 #include <linux/blkpg.h> 20 #include <linux/magic.h> 21 #include <linux/major.h> 22 #include <linux/mtd/mtd.h> 23 #include <linux/mtd/partitions.h> 24 #include <linux/mtd/map.h> 25 26 #include <linux/uaccess.h> 27 28 #include "mtdcore.h" 29 30 /* 31 * Data structure to hold the pointer to the mtd device as well 32 * as mode information of various use cases. 33 */ 34 struct mtd_file_info { 35 struct mtd_info *mtd; 36 enum mtd_file_modes mode; 37 }; 38 39 static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig) 40 { 41 struct mtd_file_info *mfi = file->private_data; 42 return fixed_size_llseek(file, offset, orig, mfi->mtd->size); 43 } 44 45 static int mtdchar_open(struct inode *inode, struct file *file) 46 { 47 int minor = iminor(inode); 48 int devnum = minor >> 1; 49 int ret = 0; 50 struct mtd_info *mtd; 51 struct mtd_file_info *mfi; 52 53 pr_debug("MTD_open\n"); 54 55 /* You can't open the RO devices RW */ 56 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 57 return -EACCES; 58 59 mtd = get_mtd_device(NULL, devnum); 60 61 if (IS_ERR(mtd)) 62 return PTR_ERR(mtd); 63 64 if (mtd->type == MTD_ABSENT) { 65 ret = -ENODEV; 66 goto out1; 67 } 68 69 /* You can't open it RW if it's not a writeable device */ 70 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 71 ret = -EACCES; 72 goto out1; 73 } 74 75 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 76 if (!mfi) { 77 ret = -ENOMEM; 78 goto out1; 79 } 80 mfi->mtd = mtd; 81 file->private_data = mfi; 82 return 0; 83 84 out1: 85 put_mtd_device(mtd); 86 return ret; 87 } /* mtdchar_open */ 88 89 /*====================================================================*/ 90 91 static int mtdchar_close(struct inode *inode, struct file *file) 92 { 93 struct mtd_file_info *mfi = file->private_data; 94 struct mtd_info *mtd = mfi->mtd; 95 96 pr_debug("MTD_close\n"); 97 98 /* Only sync if opened RW */ 99 if ((file->f_mode & FMODE_WRITE)) 100 mtd_sync(mtd); 101 102 put_mtd_device(mtd); 103 file->private_data = NULL; 104 kfree(mfi); 105 106 return 0; 107 } /* mtdchar_close */ 108 109 /* Back in June 2001, dwmw2 wrote: 110 * 111 * FIXME: This _really_ needs to die. In 2.5, we should lock the 112 * userspace buffer down and use it directly with readv/writev. 113 * 114 * The implementation below, using mtd_kmalloc_up_to, mitigates 115 * allocation failures when the system is under low-memory situations 116 * or if memory is highly fragmented at the cost of reducing the 117 * performance of the requested transfer due to a smaller buffer size. 118 * 119 * A more complex but more memory-efficient implementation based on 120 * get_user_pages and iovecs to cover extents of those pages is a 121 * longer-term goal, as intimated by dwmw2 above. However, for the 122 * write case, this requires yet more complex head and tail transfer 123 * handling when those head and tail offsets and sizes are such that 124 * alignment requirements are not met in the NAND subdriver. 125 */ 126 127 static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, 128 loff_t *ppos) 129 { 130 struct mtd_file_info *mfi = file->private_data; 131 struct mtd_info *mtd = mfi->mtd; 132 size_t retlen; 133 size_t total_retlen=0; 134 int ret=0; 135 int len; 136 size_t size = count; 137 char *kbuf; 138 139 pr_debug("MTD_read\n"); 140 141 if (*ppos + count > mtd->size) { 142 if (*ppos < mtd->size) 143 count = mtd->size - *ppos; 144 else 145 count = 0; 146 } 147 148 if (!count) 149 return 0; 150 151 kbuf = mtd_kmalloc_up_to(mtd, &size); 152 if (!kbuf) 153 return -ENOMEM; 154 155 while (count) { 156 len = min_t(size_t, count, size); 157 158 switch (mfi->mode) { 159 case MTD_FILE_MODE_OTP_FACTORY: 160 ret = mtd_read_fact_prot_reg(mtd, *ppos, len, 161 &retlen, kbuf); 162 break; 163 case MTD_FILE_MODE_OTP_USER: 164 ret = mtd_read_user_prot_reg(mtd, *ppos, len, 165 &retlen, kbuf); 166 break; 167 case MTD_FILE_MODE_RAW: 168 { 169 struct mtd_oob_ops ops = {}; 170 171 ops.mode = MTD_OPS_RAW; 172 ops.datbuf = kbuf; 173 ops.oobbuf = NULL; 174 ops.len = len; 175 176 ret = mtd_read_oob(mtd, *ppos, &ops); 177 retlen = ops.retlen; 178 break; 179 } 180 default: 181 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf); 182 } 183 /* Nand returns -EBADMSG on ECC errors, but it returns 184 * the data. For our userspace tools it is important 185 * to dump areas with ECC errors! 186 * For kernel internal usage it also might return -EUCLEAN 187 * to signal the caller that a bitflip has occurred and has 188 * been corrected by the ECC algorithm. 189 * Userspace software which accesses NAND this way 190 * must be aware of the fact that it deals with NAND 191 */ 192 if (!ret || mtd_is_bitflip_or_eccerr(ret)) { 193 *ppos += retlen; 194 if (copy_to_user(buf, kbuf, retlen)) { 195 kfree(kbuf); 196 return -EFAULT; 197 } 198 else 199 total_retlen += retlen; 200 201 count -= retlen; 202 buf += retlen; 203 if (retlen == 0) 204 count = 0; 205 } 206 else { 207 kfree(kbuf); 208 return ret; 209 } 210 211 } 212 213 kfree(kbuf); 214 return total_retlen; 215 } /* mtdchar_read */ 216 217 static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count, 218 loff_t *ppos) 219 { 220 struct mtd_file_info *mfi = file->private_data; 221 struct mtd_info *mtd = mfi->mtd; 222 size_t size = count; 223 char *kbuf; 224 size_t retlen; 225 size_t total_retlen=0; 226 int ret=0; 227 int len; 228 229 pr_debug("MTD_write\n"); 230 231 if (*ppos >= mtd->size) 232 return -ENOSPC; 233 234 if (*ppos + count > mtd->size) 235 count = mtd->size - *ppos; 236 237 if (!count) 238 return 0; 239 240 kbuf = mtd_kmalloc_up_to(mtd, &size); 241 if (!kbuf) 242 return -ENOMEM; 243 244 while (count) { 245 len = min_t(size_t, count, size); 246 247 if (copy_from_user(kbuf, buf, len)) { 248 kfree(kbuf); 249 return -EFAULT; 250 } 251 252 switch (mfi->mode) { 253 case MTD_FILE_MODE_OTP_FACTORY: 254 ret = -EROFS; 255 break; 256 case MTD_FILE_MODE_OTP_USER: 257 ret = mtd_write_user_prot_reg(mtd, *ppos, len, 258 &retlen, kbuf); 259 break; 260 261 case MTD_FILE_MODE_RAW: 262 { 263 struct mtd_oob_ops ops = {}; 264 265 ops.mode = MTD_OPS_RAW; 266 ops.datbuf = kbuf; 267 ops.oobbuf = NULL; 268 ops.ooboffs = 0; 269 ops.len = len; 270 271 ret = mtd_write_oob(mtd, *ppos, &ops); 272 retlen = ops.retlen; 273 break; 274 } 275 276 default: 277 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf); 278 } 279 280 /* 281 * Return -ENOSPC only if no data could be written at all. 282 * Otherwise just return the number of bytes that actually 283 * have been written. 284 */ 285 if ((ret == -ENOSPC) && (total_retlen)) 286 break; 287 288 if (!ret) { 289 *ppos += retlen; 290 total_retlen += retlen; 291 count -= retlen; 292 buf += retlen; 293 } 294 else { 295 kfree(kbuf); 296 return ret; 297 } 298 } 299 300 kfree(kbuf); 301 return total_retlen; 302 } /* mtdchar_write */ 303 304 /*====================================================================== 305 306 IOCTL calls for getting device parameters. 307 308 ======================================================================*/ 309 310 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 311 { 312 struct mtd_info *mtd = mfi->mtd; 313 size_t retlen; 314 315 switch (mode) { 316 case MTD_OTP_FACTORY: 317 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == 318 -EOPNOTSUPP) 319 return -EOPNOTSUPP; 320 321 mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 322 break; 323 case MTD_OTP_USER: 324 if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) == 325 -EOPNOTSUPP) 326 return -EOPNOTSUPP; 327 328 mfi->mode = MTD_FILE_MODE_OTP_USER; 329 break; 330 case MTD_OTP_OFF: 331 mfi->mode = MTD_FILE_MODE_NORMAL; 332 break; 333 default: 334 return -EINVAL; 335 } 336 337 return 0; 338 } 339 340 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, 341 uint64_t start, uint32_t length, void __user *ptr, 342 uint32_t __user *retp) 343 { 344 struct mtd_info *master = mtd_get_master(mtd); 345 struct mtd_file_info *mfi = file->private_data; 346 struct mtd_oob_ops ops = {}; 347 uint32_t retlen; 348 int ret = 0; 349 350 if (length > 4096) 351 return -EINVAL; 352 353 if (!master->_write_oob) 354 return -EOPNOTSUPP; 355 356 ops.ooblen = length; 357 ops.ooboffs = start & (mtd->writesize - 1); 358 ops.datbuf = NULL; 359 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 360 MTD_OPS_PLACE_OOB; 361 362 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 363 return -EINVAL; 364 365 ops.oobbuf = memdup_user(ptr, length); 366 if (IS_ERR(ops.oobbuf)) 367 return PTR_ERR(ops.oobbuf); 368 369 start &= ~((uint64_t)mtd->writesize - 1); 370 ret = mtd_write_oob(mtd, start, &ops); 371 372 if (ops.oobretlen > 0xFFFFFFFFU) 373 ret = -EOVERFLOW; 374 retlen = ops.oobretlen; 375 if (copy_to_user(retp, &retlen, sizeof(length))) 376 ret = -EFAULT; 377 378 kfree(ops.oobbuf); 379 return ret; 380 } 381 382 static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, 383 uint64_t start, uint32_t length, void __user *ptr, 384 uint32_t __user *retp) 385 { 386 struct mtd_file_info *mfi = file->private_data; 387 struct mtd_oob_ops ops = {}; 388 int ret = 0; 389 390 if (length > 4096) 391 return -EINVAL; 392 393 ops.ooblen = length; 394 ops.ooboffs = start & (mtd->writesize - 1); 395 ops.datbuf = NULL; 396 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 397 MTD_OPS_PLACE_OOB; 398 399 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 400 return -EINVAL; 401 402 ops.oobbuf = kmalloc(length, GFP_KERNEL); 403 if (!ops.oobbuf) 404 return -ENOMEM; 405 406 start &= ~((uint64_t)mtd->writesize - 1); 407 ret = mtd_read_oob(mtd, start, &ops); 408 409 if (put_user(ops.oobretlen, retp)) 410 ret = -EFAULT; 411 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, 412 ops.oobretlen)) 413 ret = -EFAULT; 414 415 kfree(ops.oobbuf); 416 417 /* 418 * NAND returns -EBADMSG on ECC errors, but it returns the OOB 419 * data. For our userspace tools it is important to dump areas 420 * with ECC errors! 421 * For kernel internal usage it also might return -EUCLEAN 422 * to signal the caller that a bitflip has occurred and has 423 * been corrected by the ECC algorithm. 424 * 425 * Note: currently the standard NAND function, nand_read_oob_std, 426 * does not calculate ECC for the OOB area, so do not rely on 427 * this behavior unless you have replaced it with your own. 428 */ 429 if (mtd_is_bitflip_or_eccerr(ret)) 430 return 0; 431 432 return ret; 433 } 434 435 /* 436 * Copies (and truncates, if necessary) OOB layout information to the 437 * deprecated layout struct, nand_ecclayout_user. This is necessary only to 438 * support the deprecated API ioctl ECCGETLAYOUT while allowing all new 439 * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops 440 * can describe any kind of OOB layout with almost zero overhead from a 441 * memory usage point of view). 442 */ 443 static int shrink_ecclayout(struct mtd_info *mtd, 444 struct nand_ecclayout_user *to) 445 { 446 struct mtd_oob_region oobregion; 447 int i, section = 0, ret; 448 449 if (!mtd || !to) 450 return -EINVAL; 451 452 memset(to, 0, sizeof(*to)); 453 454 to->eccbytes = 0; 455 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { 456 u32 eccpos; 457 458 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 459 if (ret < 0) { 460 if (ret != -ERANGE) 461 return ret; 462 463 break; 464 } 465 466 eccpos = oobregion.offset; 467 for (; i < MTD_MAX_ECCPOS_ENTRIES && 468 eccpos < oobregion.offset + oobregion.length; i++) { 469 to->eccpos[i] = eccpos++; 470 to->eccbytes++; 471 } 472 } 473 474 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { 475 ret = mtd_ooblayout_free(mtd, i, &oobregion); 476 if (ret < 0) { 477 if (ret != -ERANGE) 478 return ret; 479 480 break; 481 } 482 483 to->oobfree[i].offset = oobregion.offset; 484 to->oobfree[i].length = oobregion.length; 485 to->oobavail += to->oobfree[i].length; 486 } 487 488 return 0; 489 } 490 491 static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) 492 { 493 struct mtd_oob_region oobregion; 494 int i, section = 0, ret; 495 496 if (!mtd || !to) 497 return -EINVAL; 498 499 memset(to, 0, sizeof(*to)); 500 501 to->eccbytes = 0; 502 for (i = 0; i < ARRAY_SIZE(to->eccpos);) { 503 u32 eccpos; 504 505 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 506 if (ret < 0) { 507 if (ret != -ERANGE) 508 return ret; 509 510 break; 511 } 512 513 if (oobregion.length + i > ARRAY_SIZE(to->eccpos)) 514 return -EINVAL; 515 516 eccpos = oobregion.offset; 517 for (; eccpos < oobregion.offset + oobregion.length; i++) { 518 to->eccpos[i] = eccpos++; 519 to->eccbytes++; 520 } 521 } 522 523 for (i = 0; i < 8; i++) { 524 ret = mtd_ooblayout_free(mtd, i, &oobregion); 525 if (ret < 0) { 526 if (ret != -ERANGE) 527 return ret; 528 529 break; 530 } 531 532 to->oobfree[i][0] = oobregion.offset; 533 to->oobfree[i][1] = oobregion.length; 534 } 535 536 to->useecc = MTD_NANDECC_AUTOPLACE; 537 538 return 0; 539 } 540 541 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, 542 struct blkpg_ioctl_arg *arg) 543 { 544 struct blkpg_partition p; 545 546 if (!capable(CAP_SYS_ADMIN)) 547 return -EPERM; 548 549 if (copy_from_user(&p, arg->data, sizeof(p))) 550 return -EFAULT; 551 552 switch (arg->op) { 553 case BLKPG_ADD_PARTITION: 554 555 /* Only master mtd device must be used to add partitions */ 556 if (mtd_is_partition(mtd)) 557 return -EINVAL; 558 559 /* Sanitize user input */ 560 p.devname[BLKPG_DEVNAMELTH - 1] = '\0'; 561 562 return mtd_add_partition(mtd, p.devname, p.start, p.length); 563 564 case BLKPG_DEL_PARTITION: 565 566 if (p.pno < 0) 567 return -EINVAL; 568 569 return mtd_del_partition(mtd, p.pno); 570 571 default: 572 return -EINVAL; 573 } 574 } 575 576 static void adjust_oob_length(struct mtd_info *mtd, uint64_t start, 577 struct mtd_oob_ops *ops) 578 { 579 uint32_t start_page, end_page; 580 u32 oob_per_page; 581 582 if (ops->len == 0 || ops->ooblen == 0) 583 return; 584 585 start_page = mtd_div_by_ws(start, mtd); 586 end_page = mtd_div_by_ws(start + ops->len - 1, mtd); 587 oob_per_page = mtd_oobavail(mtd, ops); 588 589 ops->ooblen = min_t(size_t, ops->ooblen, 590 (end_page - start_page + 1) * oob_per_page); 591 } 592 593 static noinline_for_stack int 594 mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp) 595 { 596 struct mtd_info *master = mtd_get_master(mtd); 597 struct mtd_write_req req; 598 const void __user *usr_data, *usr_oob; 599 uint8_t *datbuf = NULL, *oobbuf = NULL; 600 size_t datbuf_len, oobbuf_len; 601 int ret = 0; 602 u64 end; 603 604 if (copy_from_user(&req, argp, sizeof(req))) 605 return -EFAULT; 606 607 usr_data = (const void __user *)(uintptr_t)req.usr_data; 608 usr_oob = (const void __user *)(uintptr_t)req.usr_oob; 609 610 if (!master->_write_oob) 611 return -EOPNOTSUPP; 612 613 if (!usr_data) 614 req.len = 0; 615 616 if (!usr_oob) 617 req.ooblen = 0; 618 619 req.len &= 0xffffffff; 620 req.ooblen &= 0xffffffff; 621 622 if (check_add_overflow(req.start, req.len, &end) || end > mtd->size) 623 return -EINVAL; 624 625 datbuf_len = min_t(size_t, req.len, mtd->erasesize); 626 if (datbuf_len > 0) { 627 datbuf = kvmalloc(datbuf_len, GFP_KERNEL); 628 if (!datbuf) 629 return -ENOMEM; 630 } 631 632 oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize); 633 if (oobbuf_len > 0) { 634 oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL); 635 if (!oobbuf) { 636 kvfree(datbuf); 637 return -ENOMEM; 638 } 639 } 640 641 while (req.len > 0 || (!usr_data && req.ooblen > 0)) { 642 struct mtd_oob_ops ops = { 643 .mode = req.mode, 644 .len = min_t(size_t, req.len, datbuf_len), 645 .ooblen = min_t(size_t, req.ooblen, oobbuf_len), 646 .datbuf = datbuf, 647 .oobbuf = oobbuf, 648 }; 649 650 /* 651 * Shorten non-page-aligned, eraseblock-sized writes so that 652 * the write ends on an eraseblock boundary. This is necessary 653 * for adjust_oob_length() to properly handle non-page-aligned 654 * writes. 655 */ 656 if (ops.len == mtd->erasesize) 657 ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd); 658 659 /* 660 * For writes which are not OOB-only, adjust the amount of OOB 661 * data written according to the number of data pages written. 662 * This is necessary to prevent OOB data from being skipped 663 * over in data+OOB writes requiring multiple mtd_write_oob() 664 * calls to be completed. 665 */ 666 adjust_oob_length(mtd, req.start, &ops); 667 668 if (copy_from_user(datbuf, usr_data, ops.len) || 669 copy_from_user(oobbuf, usr_oob, ops.ooblen)) { 670 ret = -EFAULT; 671 break; 672 } 673 674 ret = mtd_write_oob(mtd, req.start, &ops); 675 if (ret) 676 break; 677 678 req.start += ops.retlen; 679 req.len -= ops.retlen; 680 usr_data += ops.retlen; 681 682 req.ooblen -= ops.oobretlen; 683 usr_oob += ops.oobretlen; 684 } 685 686 kvfree(datbuf); 687 kvfree(oobbuf); 688 689 return ret; 690 } 691 692 static noinline_for_stack int 693 mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp) 694 { 695 struct mtd_info *master = mtd_get_master(mtd); 696 struct mtd_read_req req; 697 void __user *usr_data, *usr_oob; 698 uint8_t *datbuf = NULL, *oobbuf = NULL; 699 size_t datbuf_len, oobbuf_len; 700 size_t orig_len, orig_ooblen; 701 int ret = 0; 702 u64 end; 703 704 if (copy_from_user(&req, argp, sizeof(req))) 705 return -EFAULT; 706 707 orig_len = req.len; 708 orig_ooblen = req.ooblen; 709 710 usr_data = (void __user *)(uintptr_t)req.usr_data; 711 usr_oob = (void __user *)(uintptr_t)req.usr_oob; 712 713 if (!master->_read_oob) 714 return -EOPNOTSUPP; 715 716 if (!usr_data) 717 req.len = 0; 718 719 if (!usr_oob) 720 req.ooblen = 0; 721 722 req.ecc_stats.uncorrectable_errors = 0; 723 req.ecc_stats.corrected_bitflips = 0; 724 req.ecc_stats.max_bitflips = 0; 725 726 req.len &= 0xffffffff; 727 req.ooblen &= 0xffffffff; 728 729 if (check_add_overflow(req.start, req.len, &end) || end > mtd->size) { 730 ret = -EINVAL; 731 goto out; 732 } 733 734 datbuf_len = min_t(size_t, req.len, mtd->erasesize); 735 if (datbuf_len > 0) { 736 datbuf = kvmalloc(datbuf_len, GFP_KERNEL); 737 if (!datbuf) { 738 ret = -ENOMEM; 739 goto out; 740 } 741 } 742 743 oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize); 744 if (oobbuf_len > 0) { 745 oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL); 746 if (!oobbuf) { 747 ret = -ENOMEM; 748 goto out; 749 } 750 } 751 752 while (req.len > 0 || (!usr_data && req.ooblen > 0)) { 753 struct mtd_req_stats stats; 754 struct mtd_oob_ops ops = { 755 .mode = req.mode, 756 .len = min_t(size_t, req.len, datbuf_len), 757 .ooblen = min_t(size_t, req.ooblen, oobbuf_len), 758 .datbuf = datbuf, 759 .oobbuf = oobbuf, 760 .stats = &stats, 761 }; 762 763 /* 764 * Shorten non-page-aligned, eraseblock-sized reads so that the 765 * read ends on an eraseblock boundary. This is necessary in 766 * order to prevent OOB data for some pages from being 767 * duplicated in the output of non-page-aligned reads requiring 768 * multiple mtd_read_oob() calls to be completed. 769 */ 770 if (ops.len == mtd->erasesize) 771 ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd); 772 773 ret = mtd_read_oob(mtd, (loff_t)req.start, &ops); 774 775 req.ecc_stats.uncorrectable_errors += 776 stats.uncorrectable_errors; 777 req.ecc_stats.corrected_bitflips += stats.corrected_bitflips; 778 req.ecc_stats.max_bitflips = 779 max(req.ecc_stats.max_bitflips, stats.max_bitflips); 780 781 if (ret && !mtd_is_bitflip_or_eccerr(ret)) 782 break; 783 784 if (copy_to_user(usr_data, ops.datbuf, ops.retlen) || 785 copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) { 786 ret = -EFAULT; 787 break; 788 } 789 790 req.start += ops.retlen; 791 req.len -= ops.retlen; 792 usr_data += ops.retlen; 793 794 req.ooblen -= ops.oobretlen; 795 usr_oob += ops.oobretlen; 796 } 797 798 /* 799 * As multiple iterations of the above loop (and therefore multiple 800 * mtd_read_oob() calls) may be necessary to complete the read request, 801 * adjust the final return code to ensure it accounts for all detected 802 * ECC errors. 803 */ 804 if (!ret || mtd_is_bitflip(ret)) { 805 if (req.ecc_stats.uncorrectable_errors > 0) 806 ret = -EBADMSG; 807 else if (req.ecc_stats.corrected_bitflips > 0) 808 ret = -EUCLEAN; 809 } 810 811 out: 812 req.len = orig_len - req.len; 813 req.ooblen = orig_ooblen - req.ooblen; 814 815 if (copy_to_user(argp, &req, sizeof(req))) 816 ret = -EFAULT; 817 818 kvfree(datbuf); 819 kvfree(oobbuf); 820 821 return ret; 822 } 823 824 static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) 825 { 826 struct mtd_file_info *mfi = file->private_data; 827 struct mtd_info *mtd = mfi->mtd; 828 struct mtd_info *master = mtd_get_master(mtd); 829 void __user *argp = (void __user *)arg; 830 int ret = 0; 831 struct mtd_info_user info; 832 833 pr_debug("MTD_ioctl\n"); 834 835 /* 836 * Check the file mode to require "dangerous" commands to have write 837 * permissions. 838 */ 839 switch (cmd) { 840 /* "safe" commands */ 841 case MEMGETREGIONCOUNT: 842 case MEMGETREGIONINFO: 843 case MEMGETINFO: 844 case MEMREADOOB: 845 case MEMREADOOB64: 846 case MEMREAD: 847 case MEMISLOCKED: 848 case MEMGETOOBSEL: 849 case MEMGETBADBLOCK: 850 case OTPSELECT: 851 case OTPGETREGIONCOUNT: 852 case OTPGETREGIONINFO: 853 case ECCGETLAYOUT: 854 case ECCGETSTATS: 855 case MTDFILEMODE: 856 case BLKPG: 857 case BLKRRPART: 858 break; 859 860 /* "dangerous" commands */ 861 case MEMERASE: 862 case MEMERASE64: 863 case MEMLOCK: 864 case MEMUNLOCK: 865 case MEMSETBADBLOCK: 866 case MEMWRITEOOB: 867 case MEMWRITEOOB64: 868 case MEMWRITE: 869 case OTPLOCK: 870 case OTPERASE: 871 if (!(file->f_mode & FMODE_WRITE)) 872 return -EPERM; 873 break; 874 875 default: 876 return -ENOTTY; 877 } 878 879 switch (cmd) { 880 case MEMGETREGIONCOUNT: 881 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 882 return -EFAULT; 883 break; 884 885 case MEMGETREGIONINFO: 886 { 887 uint32_t ur_idx; 888 struct mtd_erase_region_info *kr; 889 struct region_info_user __user *ur = argp; 890 891 if (get_user(ur_idx, &(ur->regionindex))) 892 return -EFAULT; 893 894 if (ur_idx >= mtd->numeraseregions) 895 return -EINVAL; 896 897 kr = &(mtd->eraseregions[ur_idx]); 898 899 if (put_user(kr->offset, &(ur->offset)) 900 || put_user(kr->erasesize, &(ur->erasesize)) 901 || put_user(kr->numblocks, &(ur->numblocks))) 902 return -EFAULT; 903 904 break; 905 } 906 907 case MEMGETINFO: 908 memset(&info, 0, sizeof(info)); 909 info.type = mtd->type; 910 info.flags = mtd->flags; 911 info.size = mtd->size; 912 info.erasesize = mtd->erasesize; 913 info.writesize = mtd->writesize; 914 info.oobsize = mtd->oobsize; 915 /* The below field is obsolete */ 916 info.padding = 0; 917 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 918 return -EFAULT; 919 break; 920 921 case MEMERASE: 922 case MEMERASE64: 923 { 924 struct erase_info *erase; 925 926 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 927 if (!erase) 928 ret = -ENOMEM; 929 else { 930 if (cmd == MEMERASE64) { 931 struct erase_info_user64 einfo64; 932 933 if (copy_from_user(&einfo64, argp, 934 sizeof(struct erase_info_user64))) { 935 kfree(erase); 936 return -EFAULT; 937 } 938 erase->addr = einfo64.start; 939 erase->len = einfo64.length; 940 } else { 941 struct erase_info_user einfo32; 942 943 if (copy_from_user(&einfo32, argp, 944 sizeof(struct erase_info_user))) { 945 kfree(erase); 946 return -EFAULT; 947 } 948 erase->addr = einfo32.start; 949 erase->len = einfo32.length; 950 } 951 952 ret = mtd_erase(mtd, erase); 953 kfree(erase); 954 } 955 break; 956 } 957 958 case MEMWRITEOOB: 959 { 960 struct mtd_oob_buf buf; 961 struct mtd_oob_buf __user *buf_user = argp; 962 963 /* NOTE: writes return length to buf_user->length */ 964 if (copy_from_user(&buf, argp, sizeof(buf))) 965 ret = -EFAULT; 966 else 967 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 968 buf.ptr, &buf_user->length); 969 break; 970 } 971 972 case MEMREADOOB: 973 { 974 struct mtd_oob_buf buf; 975 struct mtd_oob_buf __user *buf_user = argp; 976 977 /* NOTE: writes return length to buf_user->start */ 978 if (copy_from_user(&buf, argp, sizeof(buf))) 979 ret = -EFAULT; 980 else 981 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 982 buf.ptr, &buf_user->start); 983 break; 984 } 985 986 case MEMWRITEOOB64: 987 { 988 struct mtd_oob_buf64 buf; 989 struct mtd_oob_buf64 __user *buf_user = argp; 990 991 if (copy_from_user(&buf, argp, sizeof(buf))) 992 ret = -EFAULT; 993 else 994 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 995 (void __user *)(uintptr_t)buf.usr_ptr, 996 &buf_user->length); 997 break; 998 } 999 1000 case MEMREADOOB64: 1001 { 1002 struct mtd_oob_buf64 buf; 1003 struct mtd_oob_buf64 __user *buf_user = argp; 1004 1005 if (copy_from_user(&buf, argp, sizeof(buf))) 1006 ret = -EFAULT; 1007 else 1008 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 1009 (void __user *)(uintptr_t)buf.usr_ptr, 1010 &buf_user->length); 1011 break; 1012 } 1013 1014 case MEMWRITE: 1015 { 1016 ret = mtdchar_write_ioctl(mtd, 1017 (struct mtd_write_req __user *)arg); 1018 break; 1019 } 1020 1021 case MEMREAD: 1022 { 1023 ret = mtdchar_read_ioctl(mtd, 1024 (struct mtd_read_req __user *)arg); 1025 break; 1026 } 1027 1028 case MEMLOCK: 1029 { 1030 struct erase_info_user einfo; 1031 1032 if (copy_from_user(&einfo, argp, sizeof(einfo))) 1033 return -EFAULT; 1034 1035 ret = mtd_lock(mtd, einfo.start, einfo.length); 1036 break; 1037 } 1038 1039 case MEMUNLOCK: 1040 { 1041 struct erase_info_user einfo; 1042 1043 if (copy_from_user(&einfo, argp, sizeof(einfo))) 1044 return -EFAULT; 1045 1046 ret = mtd_unlock(mtd, einfo.start, einfo.length); 1047 break; 1048 } 1049 1050 case MEMISLOCKED: 1051 { 1052 struct erase_info_user einfo; 1053 1054 if (copy_from_user(&einfo, argp, sizeof(einfo))) 1055 return -EFAULT; 1056 1057 ret = mtd_is_locked(mtd, einfo.start, einfo.length); 1058 break; 1059 } 1060 1061 /* Legacy interface */ 1062 case MEMGETOOBSEL: 1063 { 1064 struct nand_oobinfo oi; 1065 1066 if (!master->ooblayout) 1067 return -EOPNOTSUPP; 1068 1069 ret = get_oobinfo(mtd, &oi); 1070 if (ret) 1071 return ret; 1072 1073 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 1074 return -EFAULT; 1075 break; 1076 } 1077 1078 case MEMGETBADBLOCK: 1079 { 1080 loff_t offs; 1081 1082 if (copy_from_user(&offs, argp, sizeof(loff_t))) 1083 return -EFAULT; 1084 return mtd_block_isbad(mtd, offs); 1085 } 1086 1087 case MEMSETBADBLOCK: 1088 { 1089 loff_t offs; 1090 1091 if (copy_from_user(&offs, argp, sizeof(loff_t))) 1092 return -EFAULT; 1093 return mtd_block_markbad(mtd, offs); 1094 } 1095 1096 case OTPSELECT: 1097 { 1098 int mode; 1099 if (copy_from_user(&mode, argp, sizeof(int))) 1100 return -EFAULT; 1101 1102 mfi->mode = MTD_FILE_MODE_NORMAL; 1103 1104 ret = otp_select_filemode(mfi, mode); 1105 1106 file->f_pos = 0; 1107 break; 1108 } 1109 1110 case OTPGETREGIONCOUNT: 1111 case OTPGETREGIONINFO: 1112 { 1113 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 1114 size_t retlen; 1115 if (!buf) 1116 return -ENOMEM; 1117 switch (mfi->mode) { 1118 case MTD_FILE_MODE_OTP_FACTORY: 1119 ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf); 1120 break; 1121 case MTD_FILE_MODE_OTP_USER: 1122 ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf); 1123 break; 1124 default: 1125 ret = -EINVAL; 1126 break; 1127 } 1128 if (!ret) { 1129 if (cmd == OTPGETREGIONCOUNT) { 1130 int nbr = retlen / sizeof(struct otp_info); 1131 ret = copy_to_user(argp, &nbr, sizeof(int)); 1132 } else 1133 ret = copy_to_user(argp, buf, retlen); 1134 if (ret) 1135 ret = -EFAULT; 1136 } 1137 kfree(buf); 1138 break; 1139 } 1140 1141 case OTPLOCK: 1142 case OTPERASE: 1143 { 1144 struct otp_info oinfo; 1145 1146 if (mfi->mode != MTD_FILE_MODE_OTP_USER) 1147 return -EINVAL; 1148 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 1149 return -EFAULT; 1150 if (cmd == OTPLOCK) 1151 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 1152 else 1153 ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length); 1154 break; 1155 } 1156 1157 /* This ioctl is being deprecated - it truncates the ECC layout */ 1158 case ECCGETLAYOUT: 1159 { 1160 struct nand_ecclayout_user *usrlay; 1161 1162 if (!master->ooblayout) 1163 return -EOPNOTSUPP; 1164 1165 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); 1166 if (!usrlay) 1167 return -ENOMEM; 1168 1169 shrink_ecclayout(mtd, usrlay); 1170 1171 if (copy_to_user(argp, usrlay, sizeof(*usrlay))) 1172 ret = -EFAULT; 1173 kfree(usrlay); 1174 break; 1175 } 1176 1177 case ECCGETSTATS: 1178 { 1179 if (copy_to_user(argp, &mtd->ecc_stats, 1180 sizeof(struct mtd_ecc_stats))) 1181 return -EFAULT; 1182 break; 1183 } 1184 1185 case MTDFILEMODE: 1186 { 1187 mfi->mode = 0; 1188 1189 switch(arg) { 1190 case MTD_FILE_MODE_OTP_FACTORY: 1191 case MTD_FILE_MODE_OTP_USER: 1192 ret = otp_select_filemode(mfi, arg); 1193 break; 1194 1195 case MTD_FILE_MODE_RAW: 1196 if (!mtd_has_oob(mtd)) 1197 return -EOPNOTSUPP; 1198 mfi->mode = arg; 1199 break; 1200 1201 case MTD_FILE_MODE_NORMAL: 1202 break; 1203 default: 1204 ret = -EINVAL; 1205 } 1206 file->f_pos = 0; 1207 break; 1208 } 1209 1210 case BLKPG: 1211 { 1212 struct blkpg_ioctl_arg __user *blk_arg = argp; 1213 struct blkpg_ioctl_arg a; 1214 1215 if (copy_from_user(&a, blk_arg, sizeof(a))) 1216 ret = -EFAULT; 1217 else 1218 ret = mtdchar_blkpg_ioctl(mtd, &a); 1219 break; 1220 } 1221 1222 case BLKRRPART: 1223 { 1224 /* No reread partition feature. Just return ok */ 1225 ret = 0; 1226 break; 1227 } 1228 } 1229 1230 return ret; 1231 } /* memory_ioctl */ 1232 1233 static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 1234 { 1235 struct mtd_file_info *mfi = file->private_data; 1236 struct mtd_info *mtd = mfi->mtd; 1237 struct mtd_info *master = mtd_get_master(mtd); 1238 int ret; 1239 1240 mutex_lock(&master->master.chrdev_lock); 1241 ret = mtdchar_ioctl(file, cmd, arg); 1242 mutex_unlock(&master->master.chrdev_lock); 1243 1244 return ret; 1245 } 1246 1247 #ifdef CONFIG_COMPAT 1248 1249 struct mtd_oob_buf32 { 1250 u_int32_t start; 1251 u_int32_t length; 1252 compat_caddr_t ptr; /* unsigned char* */ 1253 }; 1254 1255 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 1256 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 1257 1258 static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, 1259 unsigned long arg) 1260 { 1261 struct mtd_file_info *mfi = file->private_data; 1262 struct mtd_info *mtd = mfi->mtd; 1263 struct mtd_info *master = mtd_get_master(mtd); 1264 void __user *argp = compat_ptr(arg); 1265 int ret = 0; 1266 1267 mutex_lock(&master->master.chrdev_lock); 1268 1269 switch (cmd) { 1270 case MEMWRITEOOB32: 1271 { 1272 struct mtd_oob_buf32 buf; 1273 struct mtd_oob_buf32 __user *buf_user = argp; 1274 1275 if (!(file->f_mode & FMODE_WRITE)) { 1276 ret = -EPERM; 1277 break; 1278 } 1279 1280 if (copy_from_user(&buf, argp, sizeof(buf))) 1281 ret = -EFAULT; 1282 else 1283 ret = mtdchar_writeoob(file, mtd, buf.start, 1284 buf.length, compat_ptr(buf.ptr), 1285 &buf_user->length); 1286 break; 1287 } 1288 1289 case MEMREADOOB32: 1290 { 1291 struct mtd_oob_buf32 buf; 1292 struct mtd_oob_buf32 __user *buf_user = argp; 1293 1294 /* NOTE: writes return length to buf->start */ 1295 if (copy_from_user(&buf, argp, sizeof(buf))) 1296 ret = -EFAULT; 1297 else 1298 ret = mtdchar_readoob(file, mtd, buf.start, 1299 buf.length, compat_ptr(buf.ptr), 1300 &buf_user->start); 1301 break; 1302 } 1303 1304 case BLKPG: 1305 { 1306 /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */ 1307 struct blkpg_compat_ioctl_arg __user *uarg = argp; 1308 struct blkpg_compat_ioctl_arg compat_arg; 1309 struct blkpg_ioctl_arg a; 1310 1311 if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) { 1312 ret = -EFAULT; 1313 break; 1314 } 1315 1316 memset(&a, 0, sizeof(a)); 1317 a.op = compat_arg.op; 1318 a.flags = compat_arg.flags; 1319 a.datalen = compat_arg.datalen; 1320 a.data = compat_ptr(compat_arg.data); 1321 1322 ret = mtdchar_blkpg_ioctl(mtd, &a); 1323 break; 1324 } 1325 1326 default: 1327 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp); 1328 } 1329 1330 mutex_unlock(&master->master.chrdev_lock); 1331 1332 return ret; 1333 } 1334 1335 #endif /* CONFIG_COMPAT */ 1336 1337 /* 1338 * try to determine where a shared mapping can be made 1339 * - only supported for NOMMU at the moment (MMU can't doesn't copy private 1340 * mappings) 1341 */ 1342 #ifndef CONFIG_MMU 1343 static unsigned long mtdchar_get_unmapped_area(struct file *file, 1344 unsigned long addr, 1345 unsigned long len, 1346 unsigned long pgoff, 1347 unsigned long flags) 1348 { 1349 struct mtd_file_info *mfi = file->private_data; 1350 struct mtd_info *mtd = mfi->mtd; 1351 unsigned long offset; 1352 int ret; 1353 1354 if (addr != 0) 1355 return (unsigned long) -EINVAL; 1356 1357 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1358 return (unsigned long) -EINVAL; 1359 1360 offset = pgoff << PAGE_SHIFT; 1361 if (offset > mtd->size - len) 1362 return (unsigned long) -EINVAL; 1363 1364 ret = mtd_get_unmapped_area(mtd, len, offset, flags); 1365 return ret == -EOPNOTSUPP ? -ENODEV : ret; 1366 } 1367 1368 static unsigned mtdchar_mmap_capabilities(struct file *file) 1369 { 1370 struct mtd_file_info *mfi = file->private_data; 1371 1372 return mtd_mmap_capabilities(mfi->mtd); 1373 } 1374 #endif 1375 1376 /* 1377 * set up a mapping for shared memory segments 1378 */ 1379 static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) 1380 { 1381 #ifdef CONFIG_MMU 1382 struct mtd_file_info *mfi = file->private_data; 1383 struct mtd_info *mtd = mfi->mtd; 1384 struct map_info *map = mtd->priv; 1385 1386 /* This is broken because it assumes the MTD device is map-based 1387 and that mtd->priv is a valid struct map_info. It should be 1388 replaced with something that uses the mtd_get_unmapped_area() 1389 operation properly. */ 1390 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { 1391 #ifdef pgprot_noncached 1392 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) 1393 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1394 #endif 1395 return vm_iomap_memory(vma, map->phys, map->size); 1396 } 1397 return -ENODEV; 1398 #else 1399 return vma->vm_flags & VM_SHARED ? 0 : -EACCES; 1400 #endif 1401 } 1402 1403 static const struct file_operations mtd_fops = { 1404 .owner = THIS_MODULE, 1405 .llseek = mtdchar_lseek, 1406 .read = mtdchar_read, 1407 .write = mtdchar_write, 1408 .unlocked_ioctl = mtdchar_unlocked_ioctl, 1409 #ifdef CONFIG_COMPAT 1410 .compat_ioctl = mtdchar_compat_ioctl, 1411 #endif 1412 .open = mtdchar_open, 1413 .release = mtdchar_close, 1414 .mmap = mtdchar_mmap, 1415 #ifndef CONFIG_MMU 1416 .get_unmapped_area = mtdchar_get_unmapped_area, 1417 .mmap_capabilities = mtdchar_mmap_capabilities, 1418 #endif 1419 }; 1420 1421 int __init init_mtdchar(void) 1422 { 1423 int ret; 1424 1425 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, 1426 "mtd", &mtd_fops); 1427 if (ret < 0) { 1428 pr_err("Can't allocate major number %d for MTD\n", 1429 MTD_CHAR_MAJOR); 1430 return ret; 1431 } 1432 1433 return ret; 1434 } 1435 1436 void __exit cleanup_mtdchar(void) 1437 { 1438 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1439 } 1440 1441 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1442