1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 4 */ 5 6 #include <linux/device.h> 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/mutex.h> 16 #include <linux/backing-dev.h> 17 #include <linux/compat.h> 18 #include <linux/mount.h> 19 #include <linux/blkpg.h> 20 #include <linux/magic.h> 21 #include <linux/major.h> 22 #include <linux/mtd/mtd.h> 23 #include <linux/mtd/partitions.h> 24 #include <linux/mtd/map.h> 25 26 #include <linux/uaccess.h> 27 28 #include "mtdcore.h" 29 30 /* 31 * Data structure to hold the pointer to the mtd device as well 32 * as mode information of various use cases. 33 */ 34 struct mtd_file_info { 35 struct mtd_info *mtd; 36 enum mtd_file_modes mode; 37 }; 38 39 static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig) 40 { 41 struct mtd_file_info *mfi = file->private_data; 42 return fixed_size_llseek(file, offset, orig, mfi->mtd->size); 43 } 44 45 static int mtdchar_open(struct inode *inode, struct file *file) 46 { 47 int minor = iminor(inode); 48 int devnum = minor >> 1; 49 int ret = 0; 50 struct mtd_info *mtd; 51 struct mtd_file_info *mfi; 52 53 pr_debug("MTD_open\n"); 54 55 /* You can't open the RO devices RW */ 56 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 57 return -EACCES; 58 59 mtd = get_mtd_device(NULL, devnum); 60 61 if (IS_ERR(mtd)) 62 return PTR_ERR(mtd); 63 64 if (mtd->type == MTD_ABSENT) { 65 ret = -ENODEV; 66 goto out1; 67 } 68 69 /* You can't open it RW if it's not a writeable device */ 70 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 71 ret = -EACCES; 72 goto out1; 73 } 74 75 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 76 if (!mfi) { 77 ret = -ENOMEM; 78 goto out1; 79 } 80 mfi->mtd = mtd; 81 file->private_data = mfi; 82 return 0; 83 84 out1: 85 put_mtd_device(mtd); 86 return ret; 87 } /* mtdchar_open */ 88 89 /*====================================================================*/ 90 91 static int mtdchar_close(struct inode *inode, struct file *file) 92 { 93 struct mtd_file_info *mfi = file->private_data; 94 struct mtd_info *mtd = mfi->mtd; 95 96 pr_debug("MTD_close\n"); 97 98 /* Only sync if opened RW */ 99 if ((file->f_mode & FMODE_WRITE)) 100 mtd_sync(mtd); 101 102 put_mtd_device(mtd); 103 file->private_data = NULL; 104 kfree(mfi); 105 106 return 0; 107 } /* mtdchar_close */ 108 109 /* Back in June 2001, dwmw2 wrote: 110 * 111 * FIXME: This _really_ needs to die. In 2.5, we should lock the 112 * userspace buffer down and use it directly with readv/writev. 113 * 114 * The implementation below, using mtd_kmalloc_up_to, mitigates 115 * allocation failures when the system is under low-memory situations 116 * or if memory is highly fragmented at the cost of reducing the 117 * performance of the requested transfer due to a smaller buffer size. 118 * 119 * A more complex but more memory-efficient implementation based on 120 * get_user_pages and iovecs to cover extents of those pages is a 121 * longer-term goal, as intimated by dwmw2 above. However, for the 122 * write case, this requires yet more complex head and tail transfer 123 * handling when those head and tail offsets and sizes are such that 124 * alignment requirements are not met in the NAND subdriver. 125 */ 126 127 static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, 128 loff_t *ppos) 129 { 130 struct mtd_file_info *mfi = file->private_data; 131 struct mtd_info *mtd = mfi->mtd; 132 size_t retlen; 133 size_t total_retlen=0; 134 int ret=0; 135 int len; 136 size_t size = count; 137 char *kbuf; 138 139 pr_debug("MTD_read\n"); 140 141 if (*ppos + count > mtd->size) { 142 if (*ppos < mtd->size) 143 count = mtd->size - *ppos; 144 else 145 count = 0; 146 } 147 148 if (!count) 149 return 0; 150 151 kbuf = mtd_kmalloc_up_to(mtd, &size); 152 if (!kbuf) 153 return -ENOMEM; 154 155 while (count) { 156 len = min_t(size_t, count, size); 157 158 switch (mfi->mode) { 159 case MTD_FILE_MODE_OTP_FACTORY: 160 ret = mtd_read_fact_prot_reg(mtd, *ppos, len, 161 &retlen, kbuf); 162 break; 163 case MTD_FILE_MODE_OTP_USER: 164 ret = mtd_read_user_prot_reg(mtd, *ppos, len, 165 &retlen, kbuf); 166 break; 167 case MTD_FILE_MODE_RAW: 168 { 169 struct mtd_oob_ops ops = {}; 170 171 ops.mode = MTD_OPS_RAW; 172 ops.datbuf = kbuf; 173 ops.oobbuf = NULL; 174 ops.len = len; 175 176 ret = mtd_read_oob(mtd, *ppos, &ops); 177 retlen = ops.retlen; 178 break; 179 } 180 default: 181 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf); 182 } 183 /* Nand returns -EBADMSG on ECC errors, but it returns 184 * the data. For our userspace tools it is important 185 * to dump areas with ECC errors! 186 * For kernel internal usage it also might return -EUCLEAN 187 * to signal the caller that a bitflip has occurred and has 188 * been corrected by the ECC algorithm. 189 * Userspace software which accesses NAND this way 190 * must be aware of the fact that it deals with NAND 191 */ 192 if (!ret || mtd_is_bitflip_or_eccerr(ret)) { 193 *ppos += retlen; 194 if (copy_to_user(buf, kbuf, retlen)) { 195 kfree(kbuf); 196 return -EFAULT; 197 } 198 else 199 total_retlen += retlen; 200 201 count -= retlen; 202 buf += retlen; 203 if (retlen == 0) 204 count = 0; 205 } 206 else { 207 kfree(kbuf); 208 return ret; 209 } 210 211 } 212 213 kfree(kbuf); 214 return total_retlen; 215 } /* mtdchar_read */ 216 217 static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count, 218 loff_t *ppos) 219 { 220 struct mtd_file_info *mfi = file->private_data; 221 struct mtd_info *mtd = mfi->mtd; 222 size_t size = count; 223 char *kbuf; 224 size_t retlen; 225 size_t total_retlen=0; 226 int ret=0; 227 int len; 228 229 pr_debug("MTD_write\n"); 230 231 if (*ppos >= mtd->size) 232 return -ENOSPC; 233 234 if (*ppos + count > mtd->size) 235 count = mtd->size - *ppos; 236 237 if (!count) 238 return 0; 239 240 kbuf = mtd_kmalloc_up_to(mtd, &size); 241 if (!kbuf) 242 return -ENOMEM; 243 244 while (count) { 245 len = min_t(size_t, count, size); 246 247 if (copy_from_user(kbuf, buf, len)) { 248 kfree(kbuf); 249 return -EFAULT; 250 } 251 252 switch (mfi->mode) { 253 case MTD_FILE_MODE_OTP_FACTORY: 254 ret = -EROFS; 255 break; 256 case MTD_FILE_MODE_OTP_USER: 257 ret = mtd_write_user_prot_reg(mtd, *ppos, len, 258 &retlen, kbuf); 259 break; 260 261 case MTD_FILE_MODE_RAW: 262 { 263 struct mtd_oob_ops ops = {}; 264 265 ops.mode = MTD_OPS_RAW; 266 ops.datbuf = kbuf; 267 ops.oobbuf = NULL; 268 ops.ooboffs = 0; 269 ops.len = len; 270 271 ret = mtd_write_oob(mtd, *ppos, &ops); 272 retlen = ops.retlen; 273 break; 274 } 275 276 default: 277 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf); 278 } 279 280 /* 281 * Return -ENOSPC only if no data could be written at all. 282 * Otherwise just return the number of bytes that actually 283 * have been written. 284 */ 285 if ((ret == -ENOSPC) && (total_retlen)) 286 break; 287 288 if (!ret) { 289 *ppos += retlen; 290 total_retlen += retlen; 291 count -= retlen; 292 buf += retlen; 293 } 294 else { 295 kfree(kbuf); 296 return ret; 297 } 298 } 299 300 kfree(kbuf); 301 return total_retlen; 302 } /* mtdchar_write */ 303 304 /*====================================================================== 305 306 IOCTL calls for getting device parameters. 307 308 ======================================================================*/ 309 310 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 311 { 312 struct mtd_info *mtd = mfi->mtd; 313 size_t retlen; 314 315 switch (mode) { 316 case MTD_OTP_FACTORY: 317 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == 318 -EOPNOTSUPP) 319 return -EOPNOTSUPP; 320 321 mfi->mode = MTD_FILE_MODE_OTP_FACTORY; 322 break; 323 case MTD_OTP_USER: 324 if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) == 325 -EOPNOTSUPP) 326 return -EOPNOTSUPP; 327 328 mfi->mode = MTD_FILE_MODE_OTP_USER; 329 break; 330 case MTD_OTP_OFF: 331 mfi->mode = MTD_FILE_MODE_NORMAL; 332 break; 333 default: 334 return -EINVAL; 335 } 336 337 return 0; 338 } 339 340 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, 341 uint64_t start, uint32_t length, void __user *ptr, 342 uint32_t __user *retp) 343 { 344 struct mtd_info *master = mtd_get_master(mtd); 345 struct mtd_file_info *mfi = file->private_data; 346 struct mtd_oob_ops ops = {}; 347 uint32_t retlen; 348 int ret = 0; 349 350 if (length > 4096) 351 return -EINVAL; 352 353 if (!master->_write_oob) 354 return -EOPNOTSUPP; 355 356 ops.ooblen = length; 357 ops.ooboffs = start & (mtd->writesize - 1); 358 ops.datbuf = NULL; 359 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 360 MTD_OPS_PLACE_OOB; 361 362 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 363 return -EINVAL; 364 365 ops.oobbuf = memdup_user(ptr, length); 366 if (IS_ERR(ops.oobbuf)) 367 return PTR_ERR(ops.oobbuf); 368 369 start &= ~((uint64_t)mtd->writesize - 1); 370 ret = mtd_write_oob(mtd, start, &ops); 371 372 if (ops.oobretlen > 0xFFFFFFFFU) 373 ret = -EOVERFLOW; 374 retlen = ops.oobretlen; 375 if (copy_to_user(retp, &retlen, sizeof(length))) 376 ret = -EFAULT; 377 378 kfree(ops.oobbuf); 379 return ret; 380 } 381 382 static int mtdchar_readoob(struct file *file, struct mtd_info *mtd, 383 uint64_t start, uint32_t length, void __user *ptr, 384 uint32_t __user *retp) 385 { 386 struct mtd_file_info *mfi = file->private_data; 387 struct mtd_oob_ops ops = {}; 388 int ret = 0; 389 390 if (length > 4096) 391 return -EINVAL; 392 393 ops.ooblen = length; 394 ops.ooboffs = start & (mtd->writesize - 1); 395 ops.datbuf = NULL; 396 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : 397 MTD_OPS_PLACE_OOB; 398 399 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 400 return -EINVAL; 401 402 ops.oobbuf = kmalloc(length, GFP_KERNEL); 403 if (!ops.oobbuf) 404 return -ENOMEM; 405 406 start &= ~((uint64_t)mtd->writesize - 1); 407 ret = mtd_read_oob(mtd, start, &ops); 408 409 if (put_user(ops.oobretlen, retp)) 410 ret = -EFAULT; 411 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, 412 ops.oobretlen)) 413 ret = -EFAULT; 414 415 kfree(ops.oobbuf); 416 417 /* 418 * NAND returns -EBADMSG on ECC errors, but it returns the OOB 419 * data. For our userspace tools it is important to dump areas 420 * with ECC errors! 421 * For kernel internal usage it also might return -EUCLEAN 422 * to signal the caller that a bitflip has occurred and has 423 * been corrected by the ECC algorithm. 424 * 425 * Note: currently the standard NAND function, nand_read_oob_std, 426 * does not calculate ECC for the OOB area, so do not rely on 427 * this behavior unless you have replaced it with your own. 428 */ 429 if (mtd_is_bitflip_or_eccerr(ret)) 430 return 0; 431 432 return ret; 433 } 434 435 /* 436 * Copies (and truncates, if necessary) OOB layout information to the 437 * deprecated layout struct, nand_ecclayout_user. This is necessary only to 438 * support the deprecated API ioctl ECCGETLAYOUT while allowing all new 439 * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops 440 * can describe any kind of OOB layout with almost zero overhead from a 441 * memory usage point of view). 442 */ 443 static int shrink_ecclayout(struct mtd_info *mtd, 444 struct nand_ecclayout_user *to) 445 { 446 struct mtd_oob_region oobregion; 447 int i, section = 0, ret; 448 449 if (!mtd || !to) 450 return -EINVAL; 451 452 memset(to, 0, sizeof(*to)); 453 454 to->eccbytes = 0; 455 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { 456 u32 eccpos; 457 458 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 459 if (ret < 0) { 460 if (ret != -ERANGE) 461 return ret; 462 463 break; 464 } 465 466 eccpos = oobregion.offset; 467 for (; i < MTD_MAX_ECCPOS_ENTRIES && 468 eccpos < oobregion.offset + oobregion.length; i++) { 469 to->eccpos[i] = eccpos++; 470 to->eccbytes++; 471 } 472 } 473 474 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { 475 ret = mtd_ooblayout_free(mtd, i, &oobregion); 476 if (ret < 0) { 477 if (ret != -ERANGE) 478 return ret; 479 480 break; 481 } 482 483 to->oobfree[i].offset = oobregion.offset; 484 to->oobfree[i].length = oobregion.length; 485 to->oobavail += to->oobfree[i].length; 486 } 487 488 return 0; 489 } 490 491 static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) 492 { 493 struct mtd_oob_region oobregion; 494 int i, section = 0, ret; 495 496 if (!mtd || !to) 497 return -EINVAL; 498 499 memset(to, 0, sizeof(*to)); 500 501 to->eccbytes = 0; 502 for (i = 0; i < ARRAY_SIZE(to->eccpos);) { 503 u32 eccpos; 504 505 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); 506 if (ret < 0) { 507 if (ret != -ERANGE) 508 return ret; 509 510 break; 511 } 512 513 if (oobregion.length + i > ARRAY_SIZE(to->eccpos)) 514 return -EINVAL; 515 516 eccpos = oobregion.offset; 517 for (; eccpos < oobregion.offset + oobregion.length; i++) { 518 to->eccpos[i] = eccpos++; 519 to->eccbytes++; 520 } 521 } 522 523 for (i = 0; i < 8; i++) { 524 ret = mtd_ooblayout_free(mtd, i, &oobregion); 525 if (ret < 0) { 526 if (ret != -ERANGE) 527 return ret; 528 529 break; 530 } 531 532 to->oobfree[i][0] = oobregion.offset; 533 to->oobfree[i][1] = oobregion.length; 534 } 535 536 to->useecc = MTD_NANDECC_AUTOPLACE; 537 538 return 0; 539 } 540 541 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, 542 struct blkpg_ioctl_arg *arg) 543 { 544 struct blkpg_partition p; 545 546 if (!capable(CAP_SYS_ADMIN)) 547 return -EPERM; 548 549 if (copy_from_user(&p, arg->data, sizeof(p))) 550 return -EFAULT; 551 552 switch (arg->op) { 553 case BLKPG_ADD_PARTITION: 554 555 /* Only master mtd device must be used to add partitions */ 556 if (mtd_is_partition(mtd)) 557 return -EINVAL; 558 559 /* Sanitize user input */ 560 p.devname[BLKPG_DEVNAMELTH - 1] = '\0'; 561 562 return mtd_add_partition(mtd, p.devname, p.start, p.length); 563 564 case BLKPG_DEL_PARTITION: 565 566 if (p.pno < 0) 567 return -EINVAL; 568 569 return mtd_del_partition(mtd, p.pno); 570 571 default: 572 return -EINVAL; 573 } 574 } 575 576 static void adjust_oob_length(struct mtd_info *mtd, uint64_t start, 577 struct mtd_oob_ops *ops) 578 { 579 uint32_t start_page, end_page; 580 u32 oob_per_page; 581 582 if (ops->len == 0 || ops->ooblen == 0) 583 return; 584 585 start_page = mtd_div_by_ws(start, mtd); 586 end_page = mtd_div_by_ws(start + ops->len - 1, mtd); 587 oob_per_page = mtd_oobavail(mtd, ops); 588 589 ops->ooblen = min_t(size_t, ops->ooblen, 590 (end_page - start_page + 1) * oob_per_page); 591 } 592 593 static int mtdchar_write_ioctl(struct mtd_info *mtd, 594 struct mtd_write_req __user *argp) 595 { 596 struct mtd_info *master = mtd_get_master(mtd); 597 struct mtd_write_req req; 598 const void __user *usr_data, *usr_oob; 599 uint8_t *datbuf = NULL, *oobbuf = NULL; 600 size_t datbuf_len, oobbuf_len; 601 int ret = 0; 602 603 if (copy_from_user(&req, argp, sizeof(req))) 604 return -EFAULT; 605 606 usr_data = (const void __user *)(uintptr_t)req.usr_data; 607 usr_oob = (const void __user *)(uintptr_t)req.usr_oob; 608 609 if (!master->_write_oob) 610 return -EOPNOTSUPP; 611 612 if (!usr_data) 613 req.len = 0; 614 615 if (!usr_oob) 616 req.ooblen = 0; 617 618 req.len &= 0xffffffff; 619 req.ooblen &= 0xffffffff; 620 621 if (req.start + req.len > mtd->size) 622 return -EINVAL; 623 624 datbuf_len = min_t(size_t, req.len, mtd->erasesize); 625 if (datbuf_len > 0) { 626 datbuf = kvmalloc(datbuf_len, GFP_KERNEL); 627 if (!datbuf) 628 return -ENOMEM; 629 } 630 631 oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize); 632 if (oobbuf_len > 0) { 633 oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL); 634 if (!oobbuf) { 635 kvfree(datbuf); 636 return -ENOMEM; 637 } 638 } 639 640 while (req.len > 0 || (!usr_data && req.ooblen > 0)) { 641 struct mtd_oob_ops ops = { 642 .mode = req.mode, 643 .len = min_t(size_t, req.len, datbuf_len), 644 .ooblen = min_t(size_t, req.ooblen, oobbuf_len), 645 .datbuf = datbuf, 646 .oobbuf = oobbuf, 647 }; 648 649 /* 650 * Shorten non-page-aligned, eraseblock-sized writes so that 651 * the write ends on an eraseblock boundary. This is necessary 652 * for adjust_oob_length() to properly handle non-page-aligned 653 * writes. 654 */ 655 if (ops.len == mtd->erasesize) 656 ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd); 657 658 /* 659 * For writes which are not OOB-only, adjust the amount of OOB 660 * data written according to the number of data pages written. 661 * This is necessary to prevent OOB data from being skipped 662 * over in data+OOB writes requiring multiple mtd_write_oob() 663 * calls to be completed. 664 */ 665 adjust_oob_length(mtd, req.start, &ops); 666 667 if (copy_from_user(datbuf, usr_data, ops.len) || 668 copy_from_user(oobbuf, usr_oob, ops.ooblen)) { 669 ret = -EFAULT; 670 break; 671 } 672 673 ret = mtd_write_oob(mtd, req.start, &ops); 674 if (ret) 675 break; 676 677 req.start += ops.retlen; 678 req.len -= ops.retlen; 679 usr_data += ops.retlen; 680 681 req.ooblen -= ops.oobretlen; 682 usr_oob += ops.oobretlen; 683 } 684 685 kvfree(datbuf); 686 kvfree(oobbuf); 687 688 return ret; 689 } 690 691 static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) 692 { 693 struct mtd_file_info *mfi = file->private_data; 694 struct mtd_info *mtd = mfi->mtd; 695 struct mtd_info *master = mtd_get_master(mtd); 696 void __user *argp = (void __user *)arg; 697 int ret = 0; 698 struct mtd_info_user info; 699 700 pr_debug("MTD_ioctl\n"); 701 702 /* 703 * Check the file mode to require "dangerous" commands to have write 704 * permissions. 705 */ 706 switch (cmd) { 707 /* "safe" commands */ 708 case MEMGETREGIONCOUNT: 709 case MEMGETREGIONINFO: 710 case MEMGETINFO: 711 case MEMREADOOB: 712 case MEMREADOOB64: 713 case MEMISLOCKED: 714 case MEMGETOOBSEL: 715 case MEMGETBADBLOCK: 716 case OTPSELECT: 717 case OTPGETREGIONCOUNT: 718 case OTPGETREGIONINFO: 719 case ECCGETLAYOUT: 720 case ECCGETSTATS: 721 case MTDFILEMODE: 722 case BLKPG: 723 case BLKRRPART: 724 break; 725 726 /* "dangerous" commands */ 727 case MEMERASE: 728 case MEMERASE64: 729 case MEMLOCK: 730 case MEMUNLOCK: 731 case MEMSETBADBLOCK: 732 case MEMWRITEOOB: 733 case MEMWRITEOOB64: 734 case MEMWRITE: 735 case OTPLOCK: 736 case OTPERASE: 737 if (!(file->f_mode & FMODE_WRITE)) 738 return -EPERM; 739 break; 740 741 default: 742 return -ENOTTY; 743 } 744 745 switch (cmd) { 746 case MEMGETREGIONCOUNT: 747 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 748 return -EFAULT; 749 break; 750 751 case MEMGETREGIONINFO: 752 { 753 uint32_t ur_idx; 754 struct mtd_erase_region_info *kr; 755 struct region_info_user __user *ur = argp; 756 757 if (get_user(ur_idx, &(ur->regionindex))) 758 return -EFAULT; 759 760 if (ur_idx >= mtd->numeraseregions) 761 return -EINVAL; 762 763 kr = &(mtd->eraseregions[ur_idx]); 764 765 if (put_user(kr->offset, &(ur->offset)) 766 || put_user(kr->erasesize, &(ur->erasesize)) 767 || put_user(kr->numblocks, &(ur->numblocks))) 768 return -EFAULT; 769 770 break; 771 } 772 773 case MEMGETINFO: 774 memset(&info, 0, sizeof(info)); 775 info.type = mtd->type; 776 info.flags = mtd->flags; 777 info.size = mtd->size; 778 info.erasesize = mtd->erasesize; 779 info.writesize = mtd->writesize; 780 info.oobsize = mtd->oobsize; 781 /* The below field is obsolete */ 782 info.padding = 0; 783 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 784 return -EFAULT; 785 break; 786 787 case MEMERASE: 788 case MEMERASE64: 789 { 790 struct erase_info *erase; 791 792 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 793 if (!erase) 794 ret = -ENOMEM; 795 else { 796 if (cmd == MEMERASE64) { 797 struct erase_info_user64 einfo64; 798 799 if (copy_from_user(&einfo64, argp, 800 sizeof(struct erase_info_user64))) { 801 kfree(erase); 802 return -EFAULT; 803 } 804 erase->addr = einfo64.start; 805 erase->len = einfo64.length; 806 } else { 807 struct erase_info_user einfo32; 808 809 if (copy_from_user(&einfo32, argp, 810 sizeof(struct erase_info_user))) { 811 kfree(erase); 812 return -EFAULT; 813 } 814 erase->addr = einfo32.start; 815 erase->len = einfo32.length; 816 } 817 818 ret = mtd_erase(mtd, erase); 819 kfree(erase); 820 } 821 break; 822 } 823 824 case MEMWRITEOOB: 825 { 826 struct mtd_oob_buf buf; 827 struct mtd_oob_buf __user *buf_user = argp; 828 829 /* NOTE: writes return length to buf_user->length */ 830 if (copy_from_user(&buf, argp, sizeof(buf))) 831 ret = -EFAULT; 832 else 833 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 834 buf.ptr, &buf_user->length); 835 break; 836 } 837 838 case MEMREADOOB: 839 { 840 struct mtd_oob_buf buf; 841 struct mtd_oob_buf __user *buf_user = argp; 842 843 /* NOTE: writes return length to buf_user->start */ 844 if (copy_from_user(&buf, argp, sizeof(buf))) 845 ret = -EFAULT; 846 else 847 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 848 buf.ptr, &buf_user->start); 849 break; 850 } 851 852 case MEMWRITEOOB64: 853 { 854 struct mtd_oob_buf64 buf; 855 struct mtd_oob_buf64 __user *buf_user = argp; 856 857 if (copy_from_user(&buf, argp, sizeof(buf))) 858 ret = -EFAULT; 859 else 860 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length, 861 (void __user *)(uintptr_t)buf.usr_ptr, 862 &buf_user->length); 863 break; 864 } 865 866 case MEMREADOOB64: 867 { 868 struct mtd_oob_buf64 buf; 869 struct mtd_oob_buf64 __user *buf_user = argp; 870 871 if (copy_from_user(&buf, argp, sizeof(buf))) 872 ret = -EFAULT; 873 else 874 ret = mtdchar_readoob(file, mtd, buf.start, buf.length, 875 (void __user *)(uintptr_t)buf.usr_ptr, 876 &buf_user->length); 877 break; 878 } 879 880 case MEMWRITE: 881 { 882 ret = mtdchar_write_ioctl(mtd, 883 (struct mtd_write_req __user *)arg); 884 break; 885 } 886 887 case MEMLOCK: 888 { 889 struct erase_info_user einfo; 890 891 if (copy_from_user(&einfo, argp, sizeof(einfo))) 892 return -EFAULT; 893 894 ret = mtd_lock(mtd, einfo.start, einfo.length); 895 break; 896 } 897 898 case MEMUNLOCK: 899 { 900 struct erase_info_user einfo; 901 902 if (copy_from_user(&einfo, argp, sizeof(einfo))) 903 return -EFAULT; 904 905 ret = mtd_unlock(mtd, einfo.start, einfo.length); 906 break; 907 } 908 909 case MEMISLOCKED: 910 { 911 struct erase_info_user einfo; 912 913 if (copy_from_user(&einfo, argp, sizeof(einfo))) 914 return -EFAULT; 915 916 ret = mtd_is_locked(mtd, einfo.start, einfo.length); 917 break; 918 } 919 920 /* Legacy interface */ 921 case MEMGETOOBSEL: 922 { 923 struct nand_oobinfo oi; 924 925 if (!master->ooblayout) 926 return -EOPNOTSUPP; 927 928 ret = get_oobinfo(mtd, &oi); 929 if (ret) 930 return ret; 931 932 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 933 return -EFAULT; 934 break; 935 } 936 937 case MEMGETBADBLOCK: 938 { 939 loff_t offs; 940 941 if (copy_from_user(&offs, argp, sizeof(loff_t))) 942 return -EFAULT; 943 return mtd_block_isbad(mtd, offs); 944 } 945 946 case MEMSETBADBLOCK: 947 { 948 loff_t offs; 949 950 if (copy_from_user(&offs, argp, sizeof(loff_t))) 951 return -EFAULT; 952 return mtd_block_markbad(mtd, offs); 953 } 954 955 case OTPSELECT: 956 { 957 int mode; 958 if (copy_from_user(&mode, argp, sizeof(int))) 959 return -EFAULT; 960 961 mfi->mode = MTD_FILE_MODE_NORMAL; 962 963 ret = otp_select_filemode(mfi, mode); 964 965 file->f_pos = 0; 966 break; 967 } 968 969 case OTPGETREGIONCOUNT: 970 case OTPGETREGIONINFO: 971 { 972 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 973 size_t retlen; 974 if (!buf) 975 return -ENOMEM; 976 switch (mfi->mode) { 977 case MTD_FILE_MODE_OTP_FACTORY: 978 ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf); 979 break; 980 case MTD_FILE_MODE_OTP_USER: 981 ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf); 982 break; 983 default: 984 ret = -EINVAL; 985 break; 986 } 987 if (!ret) { 988 if (cmd == OTPGETREGIONCOUNT) { 989 int nbr = retlen / sizeof(struct otp_info); 990 ret = copy_to_user(argp, &nbr, sizeof(int)); 991 } else 992 ret = copy_to_user(argp, buf, retlen); 993 if (ret) 994 ret = -EFAULT; 995 } 996 kfree(buf); 997 break; 998 } 999 1000 case OTPLOCK: 1001 case OTPERASE: 1002 { 1003 struct otp_info oinfo; 1004 1005 if (mfi->mode != MTD_FILE_MODE_OTP_USER) 1006 return -EINVAL; 1007 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 1008 return -EFAULT; 1009 if (cmd == OTPLOCK) 1010 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 1011 else 1012 ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length); 1013 break; 1014 } 1015 1016 /* This ioctl is being deprecated - it truncates the ECC layout */ 1017 case ECCGETLAYOUT: 1018 { 1019 struct nand_ecclayout_user *usrlay; 1020 1021 if (!master->ooblayout) 1022 return -EOPNOTSUPP; 1023 1024 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); 1025 if (!usrlay) 1026 return -ENOMEM; 1027 1028 shrink_ecclayout(mtd, usrlay); 1029 1030 if (copy_to_user(argp, usrlay, sizeof(*usrlay))) 1031 ret = -EFAULT; 1032 kfree(usrlay); 1033 break; 1034 } 1035 1036 case ECCGETSTATS: 1037 { 1038 if (copy_to_user(argp, &mtd->ecc_stats, 1039 sizeof(struct mtd_ecc_stats))) 1040 return -EFAULT; 1041 break; 1042 } 1043 1044 case MTDFILEMODE: 1045 { 1046 mfi->mode = 0; 1047 1048 switch(arg) { 1049 case MTD_FILE_MODE_OTP_FACTORY: 1050 case MTD_FILE_MODE_OTP_USER: 1051 ret = otp_select_filemode(mfi, arg); 1052 break; 1053 1054 case MTD_FILE_MODE_RAW: 1055 if (!mtd_has_oob(mtd)) 1056 return -EOPNOTSUPP; 1057 mfi->mode = arg; 1058 break; 1059 1060 case MTD_FILE_MODE_NORMAL: 1061 break; 1062 default: 1063 ret = -EINVAL; 1064 } 1065 file->f_pos = 0; 1066 break; 1067 } 1068 1069 case BLKPG: 1070 { 1071 struct blkpg_ioctl_arg __user *blk_arg = argp; 1072 struct blkpg_ioctl_arg a; 1073 1074 if (copy_from_user(&a, blk_arg, sizeof(a))) 1075 ret = -EFAULT; 1076 else 1077 ret = mtdchar_blkpg_ioctl(mtd, &a); 1078 break; 1079 } 1080 1081 case BLKRRPART: 1082 { 1083 /* No reread partition feature. Just return ok */ 1084 ret = 0; 1085 break; 1086 } 1087 } 1088 1089 return ret; 1090 } /* memory_ioctl */ 1091 1092 static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 1093 { 1094 struct mtd_file_info *mfi = file->private_data; 1095 struct mtd_info *mtd = mfi->mtd; 1096 struct mtd_info *master = mtd_get_master(mtd); 1097 int ret; 1098 1099 mutex_lock(&master->master.chrdev_lock); 1100 ret = mtdchar_ioctl(file, cmd, arg); 1101 mutex_unlock(&master->master.chrdev_lock); 1102 1103 return ret; 1104 } 1105 1106 #ifdef CONFIG_COMPAT 1107 1108 struct mtd_oob_buf32 { 1109 u_int32_t start; 1110 u_int32_t length; 1111 compat_caddr_t ptr; /* unsigned char* */ 1112 }; 1113 1114 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 1115 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 1116 1117 static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, 1118 unsigned long arg) 1119 { 1120 struct mtd_file_info *mfi = file->private_data; 1121 struct mtd_info *mtd = mfi->mtd; 1122 struct mtd_info *master = mtd_get_master(mtd); 1123 void __user *argp = compat_ptr(arg); 1124 int ret = 0; 1125 1126 mutex_lock(&master->master.chrdev_lock); 1127 1128 switch (cmd) { 1129 case MEMWRITEOOB32: 1130 { 1131 struct mtd_oob_buf32 buf; 1132 struct mtd_oob_buf32 __user *buf_user = argp; 1133 1134 if (!(file->f_mode & FMODE_WRITE)) { 1135 ret = -EPERM; 1136 break; 1137 } 1138 1139 if (copy_from_user(&buf, argp, sizeof(buf))) 1140 ret = -EFAULT; 1141 else 1142 ret = mtdchar_writeoob(file, mtd, buf.start, 1143 buf.length, compat_ptr(buf.ptr), 1144 &buf_user->length); 1145 break; 1146 } 1147 1148 case MEMREADOOB32: 1149 { 1150 struct mtd_oob_buf32 buf; 1151 struct mtd_oob_buf32 __user *buf_user = argp; 1152 1153 /* NOTE: writes return length to buf->start */ 1154 if (copy_from_user(&buf, argp, sizeof(buf))) 1155 ret = -EFAULT; 1156 else 1157 ret = mtdchar_readoob(file, mtd, buf.start, 1158 buf.length, compat_ptr(buf.ptr), 1159 &buf_user->start); 1160 break; 1161 } 1162 1163 case BLKPG: 1164 { 1165 /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */ 1166 struct blkpg_compat_ioctl_arg __user *uarg = argp; 1167 struct blkpg_compat_ioctl_arg compat_arg; 1168 struct blkpg_ioctl_arg a; 1169 1170 if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) { 1171 ret = -EFAULT; 1172 break; 1173 } 1174 1175 memset(&a, 0, sizeof(a)); 1176 a.op = compat_arg.op; 1177 a.flags = compat_arg.flags; 1178 a.datalen = compat_arg.datalen; 1179 a.data = compat_ptr(compat_arg.data); 1180 1181 ret = mtdchar_blkpg_ioctl(mtd, &a); 1182 break; 1183 } 1184 1185 default: 1186 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp); 1187 } 1188 1189 mutex_unlock(&master->master.chrdev_lock); 1190 1191 return ret; 1192 } 1193 1194 #endif /* CONFIG_COMPAT */ 1195 1196 /* 1197 * try to determine where a shared mapping can be made 1198 * - only supported for NOMMU at the moment (MMU can't doesn't copy private 1199 * mappings) 1200 */ 1201 #ifndef CONFIG_MMU 1202 static unsigned long mtdchar_get_unmapped_area(struct file *file, 1203 unsigned long addr, 1204 unsigned long len, 1205 unsigned long pgoff, 1206 unsigned long flags) 1207 { 1208 struct mtd_file_info *mfi = file->private_data; 1209 struct mtd_info *mtd = mfi->mtd; 1210 unsigned long offset; 1211 int ret; 1212 1213 if (addr != 0) 1214 return (unsigned long) -EINVAL; 1215 1216 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1217 return (unsigned long) -EINVAL; 1218 1219 offset = pgoff << PAGE_SHIFT; 1220 if (offset > mtd->size - len) 1221 return (unsigned long) -EINVAL; 1222 1223 ret = mtd_get_unmapped_area(mtd, len, offset, flags); 1224 return ret == -EOPNOTSUPP ? -ENODEV : ret; 1225 } 1226 1227 static unsigned mtdchar_mmap_capabilities(struct file *file) 1228 { 1229 struct mtd_file_info *mfi = file->private_data; 1230 1231 return mtd_mmap_capabilities(mfi->mtd); 1232 } 1233 #endif 1234 1235 /* 1236 * set up a mapping for shared memory segments 1237 */ 1238 static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) 1239 { 1240 #ifdef CONFIG_MMU 1241 struct mtd_file_info *mfi = file->private_data; 1242 struct mtd_info *mtd = mfi->mtd; 1243 struct map_info *map = mtd->priv; 1244 1245 /* This is broken because it assumes the MTD device is map-based 1246 and that mtd->priv is a valid struct map_info. It should be 1247 replaced with something that uses the mtd_get_unmapped_area() 1248 operation properly. */ 1249 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { 1250 #ifdef pgprot_noncached 1251 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) 1252 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1253 #endif 1254 return vm_iomap_memory(vma, map->phys, map->size); 1255 } 1256 return -ENODEV; 1257 #else 1258 return vma->vm_flags & VM_SHARED ? 0 : -EACCES; 1259 #endif 1260 } 1261 1262 static const struct file_operations mtd_fops = { 1263 .owner = THIS_MODULE, 1264 .llseek = mtdchar_lseek, 1265 .read = mtdchar_read, 1266 .write = mtdchar_write, 1267 .unlocked_ioctl = mtdchar_unlocked_ioctl, 1268 #ifdef CONFIG_COMPAT 1269 .compat_ioctl = mtdchar_compat_ioctl, 1270 #endif 1271 .open = mtdchar_open, 1272 .release = mtdchar_close, 1273 .mmap = mtdchar_mmap, 1274 #ifndef CONFIG_MMU 1275 .get_unmapped_area = mtdchar_get_unmapped_area, 1276 .mmap_capabilities = mtdchar_mmap_capabilities, 1277 #endif 1278 }; 1279 1280 int __init init_mtdchar(void) 1281 { 1282 int ret; 1283 1284 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, 1285 "mtd", &mtd_fops); 1286 if (ret < 0) { 1287 pr_err("Can't allocate major number %d for MTD\n", 1288 MTD_CHAR_MAJOR); 1289 return ret; 1290 } 1291 1292 return ret; 1293 } 1294 1295 void __exit cleanup_mtdchar(void) 1296 { 1297 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1298 } 1299 1300 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1301