1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Core registration and callback routines for MTD 4 * drivers and users. 5 * 6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 7 * Copyright © 2006 Red Hat UK Limited 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/ptrace.h> 13 #include <linux/seq_file.h> 14 #include <linux/string.h> 15 #include <linux/timer.h> 16 #include <linux/major.h> 17 #include <linux/fs.h> 18 #include <linux/err.h> 19 #include <linux/ioctl.h> 20 #include <linux/init.h> 21 #include <linux/of.h> 22 #include <linux/proc_fs.h> 23 #include <linux/idr.h> 24 #include <linux/backing-dev.h> 25 #include <linux/gfp.h> 26 #include <linux/slab.h> 27 #include <linux/reboot.h> 28 #include <linux/leds.h> 29 #include <linux/debugfs.h> 30 #include <linux/nvmem-provider.h> 31 32 #include <linux/mtd/mtd.h> 33 #include <linux/mtd/partitions.h> 34 35 #include "mtdcore.h" 36 37 struct backing_dev_info *mtd_bdi; 38 39 #ifdef CONFIG_PM_SLEEP 40 41 static int mtd_cls_suspend(struct device *dev) 42 { 43 struct mtd_info *mtd = dev_get_drvdata(dev); 44 45 return mtd ? mtd_suspend(mtd) : 0; 46 } 47 48 static int mtd_cls_resume(struct device *dev) 49 { 50 struct mtd_info *mtd = dev_get_drvdata(dev); 51 52 if (mtd) 53 mtd_resume(mtd); 54 return 0; 55 } 56 57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume); 58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops) 59 #else 60 #define MTD_CLS_PM_OPS NULL 61 #endif 62 63 static struct class mtd_class = { 64 .name = "mtd", 65 .owner = THIS_MODULE, 66 .pm = MTD_CLS_PM_OPS, 67 }; 68 69 static DEFINE_IDR(mtd_idr); 70 71 /* These are exported solely for the purpose of mtd_blkdevs.c. You 72 should not use them for _anything_ else */ 73 DEFINE_MUTEX(mtd_table_mutex); 74 EXPORT_SYMBOL_GPL(mtd_table_mutex); 75 76 struct mtd_info *__mtd_next_device(int i) 77 { 78 return idr_get_next(&mtd_idr, &i); 79 } 80 EXPORT_SYMBOL_GPL(__mtd_next_device); 81 82 static LIST_HEAD(mtd_notifiers); 83 84 85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) 86 87 /* REVISIT once MTD uses the driver model better, whoever allocates 88 * the mtd_info will probably want to use the release() hook... 89 */ 90 static void mtd_release(struct device *dev) 91 { 92 struct mtd_info *mtd = dev_get_drvdata(dev); 93 dev_t index = MTD_DEVT(mtd->index); 94 95 /* remove /dev/mtdXro node */ 96 device_destroy(&mtd_class, index + 1); 97 } 98 99 #define MTD_DEVICE_ATTR_RO(name) \ 100 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL) 101 102 #define MTD_DEVICE_ATTR_RW(name) \ 103 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store) 104 105 static ssize_t mtd_type_show(struct device *dev, 106 struct device_attribute *attr, char *buf) 107 { 108 struct mtd_info *mtd = dev_get_drvdata(dev); 109 char *type; 110 111 switch (mtd->type) { 112 case MTD_ABSENT: 113 type = "absent"; 114 break; 115 case MTD_RAM: 116 type = "ram"; 117 break; 118 case MTD_ROM: 119 type = "rom"; 120 break; 121 case MTD_NORFLASH: 122 type = "nor"; 123 break; 124 case MTD_NANDFLASH: 125 type = "nand"; 126 break; 127 case MTD_DATAFLASH: 128 type = "dataflash"; 129 break; 130 case MTD_UBIVOLUME: 131 type = "ubi"; 132 break; 133 case MTD_MLCNANDFLASH: 134 type = "mlc-nand"; 135 break; 136 default: 137 type = "unknown"; 138 } 139 140 return sysfs_emit(buf, "%s\n", type); 141 } 142 MTD_DEVICE_ATTR_RO(type); 143 144 static ssize_t mtd_flags_show(struct device *dev, 145 struct device_attribute *attr, char *buf) 146 { 147 struct mtd_info *mtd = dev_get_drvdata(dev); 148 149 return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags); 150 } 151 MTD_DEVICE_ATTR_RO(flags); 152 153 static ssize_t mtd_size_show(struct device *dev, 154 struct device_attribute *attr, char *buf) 155 { 156 struct mtd_info *mtd = dev_get_drvdata(dev); 157 158 return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size); 159 } 160 MTD_DEVICE_ATTR_RO(size); 161 162 static ssize_t mtd_erasesize_show(struct device *dev, 163 struct device_attribute *attr, char *buf) 164 { 165 struct mtd_info *mtd = dev_get_drvdata(dev); 166 167 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize); 168 } 169 MTD_DEVICE_ATTR_RO(erasesize); 170 171 static ssize_t mtd_writesize_show(struct device *dev, 172 struct device_attribute *attr, char *buf) 173 { 174 struct mtd_info *mtd = dev_get_drvdata(dev); 175 176 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize); 177 } 178 MTD_DEVICE_ATTR_RO(writesize); 179 180 static ssize_t mtd_subpagesize_show(struct device *dev, 181 struct device_attribute *attr, char *buf) 182 { 183 struct mtd_info *mtd = dev_get_drvdata(dev); 184 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; 185 186 return sysfs_emit(buf, "%u\n", subpagesize); 187 } 188 MTD_DEVICE_ATTR_RO(subpagesize); 189 190 static ssize_t mtd_oobsize_show(struct device *dev, 191 struct device_attribute *attr, char *buf) 192 { 193 struct mtd_info *mtd = dev_get_drvdata(dev); 194 195 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize); 196 } 197 MTD_DEVICE_ATTR_RO(oobsize); 198 199 static ssize_t mtd_oobavail_show(struct device *dev, 200 struct device_attribute *attr, char *buf) 201 { 202 struct mtd_info *mtd = dev_get_drvdata(dev); 203 204 return sysfs_emit(buf, "%u\n", mtd->oobavail); 205 } 206 MTD_DEVICE_ATTR_RO(oobavail); 207 208 static ssize_t mtd_numeraseregions_show(struct device *dev, 209 struct device_attribute *attr, char *buf) 210 { 211 struct mtd_info *mtd = dev_get_drvdata(dev); 212 213 return sysfs_emit(buf, "%u\n", mtd->numeraseregions); 214 } 215 MTD_DEVICE_ATTR_RO(numeraseregions); 216 217 static ssize_t mtd_name_show(struct device *dev, 218 struct device_attribute *attr, char *buf) 219 { 220 struct mtd_info *mtd = dev_get_drvdata(dev); 221 222 return sysfs_emit(buf, "%s\n", mtd->name); 223 } 224 MTD_DEVICE_ATTR_RO(name); 225 226 static ssize_t mtd_ecc_strength_show(struct device *dev, 227 struct device_attribute *attr, char *buf) 228 { 229 struct mtd_info *mtd = dev_get_drvdata(dev); 230 231 return sysfs_emit(buf, "%u\n", mtd->ecc_strength); 232 } 233 MTD_DEVICE_ATTR_RO(ecc_strength); 234 235 static ssize_t mtd_bitflip_threshold_show(struct device *dev, 236 struct device_attribute *attr, 237 char *buf) 238 { 239 struct mtd_info *mtd = dev_get_drvdata(dev); 240 241 return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold); 242 } 243 244 static ssize_t mtd_bitflip_threshold_store(struct device *dev, 245 struct device_attribute *attr, 246 const char *buf, size_t count) 247 { 248 struct mtd_info *mtd = dev_get_drvdata(dev); 249 unsigned int bitflip_threshold; 250 int retval; 251 252 retval = kstrtouint(buf, 0, &bitflip_threshold); 253 if (retval) 254 return retval; 255 256 mtd->bitflip_threshold = bitflip_threshold; 257 return count; 258 } 259 MTD_DEVICE_ATTR_RW(bitflip_threshold); 260 261 static ssize_t mtd_ecc_step_size_show(struct device *dev, 262 struct device_attribute *attr, char *buf) 263 { 264 struct mtd_info *mtd = dev_get_drvdata(dev); 265 266 return sysfs_emit(buf, "%u\n", mtd->ecc_step_size); 267 268 } 269 MTD_DEVICE_ATTR_RO(ecc_step_size); 270 271 static ssize_t mtd_corrected_bits_show(struct device *dev, 272 struct device_attribute *attr, char *buf) 273 { 274 struct mtd_info *mtd = dev_get_drvdata(dev); 275 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 276 277 return sysfs_emit(buf, "%u\n", ecc_stats->corrected); 278 } 279 MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */ 280 281 static ssize_t mtd_ecc_failures_show(struct device *dev, 282 struct device_attribute *attr, char *buf) 283 { 284 struct mtd_info *mtd = dev_get_drvdata(dev); 285 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 286 287 return sysfs_emit(buf, "%u\n", ecc_stats->failed); 288 } 289 MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */ 290 291 static ssize_t mtd_bad_blocks_show(struct device *dev, 292 struct device_attribute *attr, char *buf) 293 { 294 struct mtd_info *mtd = dev_get_drvdata(dev); 295 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 296 297 return sysfs_emit(buf, "%u\n", ecc_stats->badblocks); 298 } 299 MTD_DEVICE_ATTR_RO(bad_blocks); 300 301 static ssize_t mtd_bbt_blocks_show(struct device *dev, 302 struct device_attribute *attr, char *buf) 303 { 304 struct mtd_info *mtd = dev_get_drvdata(dev); 305 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 306 307 return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks); 308 } 309 MTD_DEVICE_ATTR_RO(bbt_blocks); 310 311 static struct attribute *mtd_attrs[] = { 312 &dev_attr_type.attr, 313 &dev_attr_flags.attr, 314 &dev_attr_size.attr, 315 &dev_attr_erasesize.attr, 316 &dev_attr_writesize.attr, 317 &dev_attr_subpagesize.attr, 318 &dev_attr_oobsize.attr, 319 &dev_attr_oobavail.attr, 320 &dev_attr_numeraseregions.attr, 321 &dev_attr_name.attr, 322 &dev_attr_ecc_strength.attr, 323 &dev_attr_ecc_step_size.attr, 324 &dev_attr_corrected_bits.attr, 325 &dev_attr_ecc_failures.attr, 326 &dev_attr_bad_blocks.attr, 327 &dev_attr_bbt_blocks.attr, 328 &dev_attr_bitflip_threshold.attr, 329 NULL, 330 }; 331 ATTRIBUTE_GROUPS(mtd); 332 333 static const struct device_type mtd_devtype = { 334 .name = "mtd", 335 .groups = mtd_groups, 336 .release = mtd_release, 337 }; 338 339 static int mtd_partid_debug_show(struct seq_file *s, void *p) 340 { 341 struct mtd_info *mtd = s->private; 342 343 seq_printf(s, "%s\n", mtd->dbg.partid); 344 345 return 0; 346 } 347 348 DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug); 349 350 static int mtd_partname_debug_show(struct seq_file *s, void *p) 351 { 352 struct mtd_info *mtd = s->private; 353 354 seq_printf(s, "%s\n", mtd->dbg.partname); 355 356 return 0; 357 } 358 359 DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug); 360 361 static struct dentry *dfs_dir_mtd; 362 363 static void mtd_debugfs_populate(struct mtd_info *mtd) 364 { 365 struct mtd_info *master = mtd_get_master(mtd); 366 struct device *dev = &mtd->dev; 367 struct dentry *root; 368 369 if (IS_ERR_OR_NULL(dfs_dir_mtd)) 370 return; 371 372 root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); 373 mtd->dbg.dfs_dir = root; 374 375 if (master->dbg.partid) 376 debugfs_create_file("partid", 0400, root, master, 377 &mtd_partid_debug_fops); 378 379 if (master->dbg.partname) 380 debugfs_create_file("partname", 0400, root, master, 381 &mtd_partname_debug_fops); 382 } 383 384 #ifndef CONFIG_MMU 385 unsigned mtd_mmap_capabilities(struct mtd_info *mtd) 386 { 387 switch (mtd->type) { 388 case MTD_RAM: 389 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 390 NOMMU_MAP_READ | NOMMU_MAP_WRITE; 391 case MTD_ROM: 392 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 393 NOMMU_MAP_READ; 394 default: 395 return NOMMU_MAP_COPY; 396 } 397 } 398 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); 399 #endif 400 401 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, 402 void *cmd) 403 { 404 struct mtd_info *mtd; 405 406 mtd = container_of(n, struct mtd_info, reboot_notifier); 407 mtd->_reboot(mtd); 408 409 return NOTIFY_DONE; 410 } 411 412 /** 413 * mtd_wunit_to_pairing_info - get pairing information of a wunit 414 * @mtd: pointer to new MTD device info structure 415 * @wunit: write unit we are interested in 416 * @info: returned pairing information 417 * 418 * Retrieve pairing information associated to the wunit. 419 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be 420 * paired together, and where programming a page may influence the page it is 421 * paired with. 422 * The notion of page is replaced by the term wunit (write-unit) to stay 423 * consistent with the ->writesize field. 424 * 425 * The @wunit argument can be extracted from an absolute offset using 426 * mtd_offset_to_wunit(). @info is filled with the pairing information attached 427 * to @wunit. 428 * 429 * From the pairing info the MTD user can find all the wunits paired with 430 * @wunit using the following loop: 431 * 432 * for (i = 0; i < mtd_pairing_groups(mtd); i++) { 433 * info.pair = i; 434 * mtd_pairing_info_to_wunit(mtd, &info); 435 * ... 436 * } 437 */ 438 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, 439 struct mtd_pairing_info *info) 440 { 441 struct mtd_info *master = mtd_get_master(mtd); 442 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master); 443 444 if (wunit < 0 || wunit >= npairs) 445 return -EINVAL; 446 447 if (master->pairing && master->pairing->get_info) 448 return master->pairing->get_info(master, wunit, info); 449 450 info->group = 0; 451 info->pair = wunit; 452 453 return 0; 454 } 455 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info); 456 457 /** 458 * mtd_pairing_info_to_wunit - get wunit from pairing information 459 * @mtd: pointer to new MTD device info structure 460 * @info: pairing information struct 461 * 462 * Returns a positive number representing the wunit associated to the info 463 * struct, or a negative error code. 464 * 465 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to 466 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info() 467 * doc). 468 * 469 * It can also be used to only program the first page of each pair (i.e. 470 * page attached to group 0), which allows one to use an MLC NAND in 471 * software-emulated SLC mode: 472 * 473 * info.group = 0; 474 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd); 475 * for (info.pair = 0; info.pair < npairs; info.pair++) { 476 * wunit = mtd_pairing_info_to_wunit(mtd, &info); 477 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit), 478 * mtd->writesize, &retlen, buf + (i * mtd->writesize)); 479 * } 480 */ 481 int mtd_pairing_info_to_wunit(struct mtd_info *mtd, 482 const struct mtd_pairing_info *info) 483 { 484 struct mtd_info *master = mtd_get_master(mtd); 485 int ngroups = mtd_pairing_groups(master); 486 int npairs = mtd_wunit_per_eb(master) / ngroups; 487 488 if (!info || info->pair < 0 || info->pair >= npairs || 489 info->group < 0 || info->group >= ngroups) 490 return -EINVAL; 491 492 if (master->pairing && master->pairing->get_wunit) 493 return mtd->pairing->get_wunit(master, info); 494 495 return info->pair; 496 } 497 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit); 498 499 /** 500 * mtd_pairing_groups - get the number of pairing groups 501 * @mtd: pointer to new MTD device info structure 502 * 503 * Returns the number of pairing groups. 504 * 505 * This number is usually equal to the number of bits exposed by a single 506 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit() 507 * to iterate over all pages of a given pair. 508 */ 509 int mtd_pairing_groups(struct mtd_info *mtd) 510 { 511 struct mtd_info *master = mtd_get_master(mtd); 512 513 if (!master->pairing || !master->pairing->ngroups) 514 return 1; 515 516 return master->pairing->ngroups; 517 } 518 EXPORT_SYMBOL_GPL(mtd_pairing_groups); 519 520 static int mtd_nvmem_reg_read(void *priv, unsigned int offset, 521 void *val, size_t bytes) 522 { 523 struct mtd_info *mtd = priv; 524 size_t retlen; 525 int err; 526 527 err = mtd_read(mtd, offset, bytes, &retlen, val); 528 if (err && err != -EUCLEAN) 529 return err; 530 531 return retlen == bytes ? 0 : -EIO; 532 } 533 534 static int mtd_nvmem_add(struct mtd_info *mtd) 535 { 536 struct device_node *node = mtd_get_of_node(mtd); 537 struct nvmem_config config = {}; 538 539 config.id = -1; 540 config.dev = &mtd->dev; 541 config.name = dev_name(&mtd->dev); 542 config.owner = THIS_MODULE; 543 config.reg_read = mtd_nvmem_reg_read; 544 config.size = mtd->size; 545 config.word_size = 1; 546 config.stride = 1; 547 config.read_only = true; 548 config.root_only = true; 549 config.no_of_node = !of_device_is_compatible(node, "nvmem-cells"); 550 config.priv = mtd; 551 552 mtd->nvmem = nvmem_register(&config); 553 if (IS_ERR(mtd->nvmem)) { 554 /* Just ignore if there is no NVMEM support in the kernel */ 555 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) { 556 mtd->nvmem = NULL; 557 } else { 558 dev_err(&mtd->dev, "Failed to register NVMEM device\n"); 559 return PTR_ERR(mtd->nvmem); 560 } 561 } 562 563 return 0; 564 } 565 566 /** 567 * add_mtd_device - register an MTD device 568 * @mtd: pointer to new MTD device info structure 569 * 570 * Add a device to the list of MTD devices present in the system, and 571 * notify each currently active MTD 'user' of its arrival. Returns 572 * zero on success or non-zero on failure. 573 */ 574 575 int add_mtd_device(struct mtd_info *mtd) 576 { 577 struct mtd_info *master = mtd_get_master(mtd); 578 struct mtd_notifier *not; 579 int i, error; 580 581 /* 582 * May occur, for instance, on buggy drivers which call 583 * mtd_device_parse_register() multiple times on the same master MTD, 584 * especially with CONFIG_MTD_PARTITIONED_MASTER=y. 585 */ 586 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n")) 587 return -EEXIST; 588 589 BUG_ON(mtd->writesize == 0); 590 591 /* 592 * MTD drivers should implement ->_{write,read}() or 593 * ->_{write,read}_oob(), but not both. 594 */ 595 if (WARN_ON((mtd->_write && mtd->_write_oob) || 596 (mtd->_read && mtd->_read_oob))) 597 return -EINVAL; 598 599 if (WARN_ON((!mtd->erasesize || !master->_erase) && 600 !(mtd->flags & MTD_NO_ERASE))) 601 return -EINVAL; 602 603 /* 604 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the 605 * master is an MLC NAND and has a proper pairing scheme defined. 606 * We also reject masters that implement ->_writev() for now, because 607 * NAND controller drivers don't implement this hook, and adding the 608 * SLC -> MLC address/length conversion to this path is useless if we 609 * don't have a user. 610 */ 611 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION && 612 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH || 613 !master->pairing || master->_writev)) 614 return -EINVAL; 615 616 mutex_lock(&mtd_table_mutex); 617 618 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); 619 if (i < 0) { 620 error = i; 621 goto fail_locked; 622 } 623 624 mtd->index = i; 625 mtd->usecount = 0; 626 627 /* default value if not set by driver */ 628 if (mtd->bitflip_threshold == 0) 629 mtd->bitflip_threshold = mtd->ecc_strength; 630 631 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 632 int ngroups = mtd_pairing_groups(master); 633 634 mtd->erasesize /= ngroups; 635 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) * 636 mtd->erasesize; 637 } 638 639 if (is_power_of_2(mtd->erasesize)) 640 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 641 else 642 mtd->erasesize_shift = 0; 643 644 if (is_power_of_2(mtd->writesize)) 645 mtd->writesize_shift = ffs(mtd->writesize) - 1; 646 else 647 mtd->writesize_shift = 0; 648 649 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 650 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 651 652 /* Some chips always power up locked. Unlock them now */ 653 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { 654 error = mtd_unlock(mtd, 0, mtd->size); 655 if (error && error != -EOPNOTSUPP) 656 printk(KERN_WARNING 657 "%s: unlock failed, writes may not work\n", 658 mtd->name); 659 /* Ignore unlock failures? */ 660 error = 0; 661 } 662 663 /* Caller should have set dev.parent to match the 664 * physical device, if appropriate. 665 */ 666 mtd->dev.type = &mtd_devtype; 667 mtd->dev.class = &mtd_class; 668 mtd->dev.devt = MTD_DEVT(i); 669 dev_set_name(&mtd->dev, "mtd%d", i); 670 dev_set_drvdata(&mtd->dev, mtd); 671 of_node_get(mtd_get_of_node(mtd)); 672 error = device_register(&mtd->dev); 673 if (error) 674 goto fail_added; 675 676 /* Add the nvmem provider */ 677 error = mtd_nvmem_add(mtd); 678 if (error) 679 goto fail_nvmem_add; 680 681 mtd_debugfs_populate(mtd); 682 683 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, 684 "mtd%dro", i); 685 686 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); 687 /* No need to get a refcount on the module containing 688 the notifier, since we hold the mtd_table_mutex */ 689 list_for_each_entry(not, &mtd_notifiers, list) 690 not->add(mtd); 691 692 mutex_unlock(&mtd_table_mutex); 693 /* We _know_ we aren't being removed, because 694 our caller is still holding us here. So none 695 of this try_ nonsense, and no bitching about it 696 either. :) */ 697 __module_get(THIS_MODULE); 698 return 0; 699 700 fail_nvmem_add: 701 device_unregister(&mtd->dev); 702 fail_added: 703 of_node_put(mtd_get_of_node(mtd)); 704 idr_remove(&mtd_idr, i); 705 fail_locked: 706 mutex_unlock(&mtd_table_mutex); 707 return error; 708 } 709 710 /** 711 * del_mtd_device - unregister an MTD device 712 * @mtd: pointer to MTD device info structure 713 * 714 * Remove a device from the list of MTD devices present in the system, 715 * and notify each currently active MTD 'user' of its departure. 716 * Returns zero on success or 1 on failure, which currently will happen 717 * if the requested device does not appear to be present in the list. 718 */ 719 720 int del_mtd_device(struct mtd_info *mtd) 721 { 722 int ret; 723 struct mtd_notifier *not; 724 725 mutex_lock(&mtd_table_mutex); 726 727 debugfs_remove_recursive(mtd->dbg.dfs_dir); 728 729 if (idr_find(&mtd_idr, mtd->index) != mtd) { 730 ret = -ENODEV; 731 goto out_error; 732 } 733 734 /* No need to get a refcount on the module containing 735 the notifier, since we hold the mtd_table_mutex */ 736 list_for_each_entry(not, &mtd_notifiers, list) 737 not->remove(mtd); 738 739 if (mtd->usecount) { 740 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", 741 mtd->index, mtd->name, mtd->usecount); 742 ret = -EBUSY; 743 } else { 744 /* Try to remove the NVMEM provider */ 745 if (mtd->nvmem) 746 nvmem_unregister(mtd->nvmem); 747 748 device_unregister(&mtd->dev); 749 750 idr_remove(&mtd_idr, mtd->index); 751 of_node_put(mtd_get_of_node(mtd)); 752 753 module_put(THIS_MODULE); 754 ret = 0; 755 } 756 757 out_error: 758 mutex_unlock(&mtd_table_mutex); 759 return ret; 760 } 761 762 /* 763 * Set a few defaults based on the parent devices, if not provided by the 764 * driver 765 */ 766 static void mtd_set_dev_defaults(struct mtd_info *mtd) 767 { 768 if (mtd->dev.parent) { 769 if (!mtd->owner && mtd->dev.parent->driver) 770 mtd->owner = mtd->dev.parent->driver->owner; 771 if (!mtd->name) 772 mtd->name = dev_name(mtd->dev.parent); 773 } else { 774 pr_debug("mtd device won't show a device symlink in sysfs\n"); 775 } 776 777 INIT_LIST_HEAD(&mtd->partitions); 778 mutex_init(&mtd->master.partitions_lock); 779 mutex_init(&mtd->master.chrdev_lock); 780 } 781 782 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user) 783 { 784 struct otp_info *info; 785 ssize_t size = 0; 786 unsigned int i; 787 size_t retlen; 788 int ret; 789 790 info = kmalloc(PAGE_SIZE, GFP_KERNEL); 791 if (!info) 792 return -ENOMEM; 793 794 if (is_user) 795 ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info); 796 else 797 ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info); 798 if (ret) 799 goto err; 800 801 for (i = 0; i < retlen / sizeof(*info); i++) 802 size += info[i].length; 803 804 kfree(info); 805 return size; 806 807 err: 808 kfree(info); 809 810 /* ENODATA means there is no OTP region. */ 811 return ret == -ENODATA ? 0 : ret; 812 } 813 814 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, 815 const char *compatible, 816 int size, 817 nvmem_reg_read_t reg_read) 818 { 819 struct nvmem_device *nvmem = NULL; 820 struct nvmem_config config = {}; 821 struct device_node *np; 822 823 /* DT binding is optional */ 824 np = of_get_compatible_child(mtd->dev.of_node, compatible); 825 826 /* OTP nvmem will be registered on the physical device */ 827 config.dev = mtd->dev.parent; 828 /* just reuse the compatible as name */ 829 config.name = compatible; 830 config.id = NVMEM_DEVID_NONE; 831 config.owner = THIS_MODULE; 832 config.type = NVMEM_TYPE_OTP; 833 config.root_only = true; 834 config.reg_read = reg_read; 835 config.size = size; 836 config.of_node = np; 837 config.priv = mtd; 838 839 nvmem = nvmem_register(&config); 840 /* Just ignore if there is no NVMEM support in the kernel */ 841 if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP) 842 nvmem = NULL; 843 844 of_node_put(np); 845 846 return nvmem; 847 } 848 849 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset, 850 void *val, size_t bytes) 851 { 852 struct mtd_info *mtd = priv; 853 size_t retlen; 854 int ret; 855 856 ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val); 857 if (ret) 858 return ret; 859 860 return retlen == bytes ? 0 : -EIO; 861 } 862 863 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset, 864 void *val, size_t bytes) 865 { 866 struct mtd_info *mtd = priv; 867 size_t retlen; 868 int ret; 869 870 ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val); 871 if (ret) 872 return ret; 873 874 return retlen == bytes ? 0 : -EIO; 875 } 876 877 static int mtd_otp_nvmem_add(struct mtd_info *mtd) 878 { 879 struct nvmem_device *nvmem; 880 ssize_t size; 881 int err; 882 883 if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) { 884 size = mtd_otp_size(mtd, true); 885 if (size < 0) 886 return size; 887 888 if (size > 0) { 889 nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size, 890 mtd_nvmem_user_otp_reg_read); 891 if (IS_ERR(nvmem)) { 892 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n"); 893 return PTR_ERR(nvmem); 894 } 895 mtd->otp_user_nvmem = nvmem; 896 } 897 } 898 899 if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) { 900 size = mtd_otp_size(mtd, false); 901 if (size < 0) { 902 err = size; 903 goto err; 904 } 905 906 if (size > 0) { 907 nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size, 908 mtd_nvmem_fact_otp_reg_read); 909 if (IS_ERR(nvmem)) { 910 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n"); 911 err = PTR_ERR(nvmem); 912 goto err; 913 } 914 mtd->otp_factory_nvmem = nvmem; 915 } 916 } 917 918 return 0; 919 920 err: 921 if (mtd->otp_user_nvmem) 922 nvmem_unregister(mtd->otp_user_nvmem); 923 return err; 924 } 925 926 /** 927 * mtd_device_parse_register - parse partitions and register an MTD device. 928 * 929 * @mtd: the MTD device to register 930 * @types: the list of MTD partition probes to try, see 931 * 'parse_mtd_partitions()' for more information 932 * @parser_data: MTD partition parser-specific data 933 * @parts: fallback partition information to register, if parsing fails; 934 * only valid if %nr_parts > %0 935 * @nr_parts: the number of partitions in parts, if zero then the full 936 * MTD device is registered if no partition info is found 937 * 938 * This function aggregates MTD partitions parsing (done by 939 * 'parse_mtd_partitions()') and MTD device and partitions registering. It 940 * basically follows the most common pattern found in many MTD drivers: 941 * 942 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is 943 * registered first. 944 * * Then It tries to probe partitions on MTD device @mtd using parsers 945 * specified in @types (if @types is %NULL, then the default list of parsers 946 * is used, see 'parse_mtd_partitions()' for more information). If none are 947 * found this functions tries to fallback to information specified in 948 * @parts/@nr_parts. 949 * * If no partitions were found this function just registers the MTD device 950 * @mtd and exits. 951 * 952 * Returns zero in case of success and a negative error code in case of failure. 953 */ 954 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, 955 struct mtd_part_parser_data *parser_data, 956 const struct mtd_partition *parts, 957 int nr_parts) 958 { 959 int ret; 960 961 mtd_set_dev_defaults(mtd); 962 963 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { 964 ret = add_mtd_device(mtd); 965 if (ret) 966 return ret; 967 } 968 969 /* Prefer parsed partitions over driver-provided fallback */ 970 ret = parse_mtd_partitions(mtd, types, parser_data); 971 if (ret == -EPROBE_DEFER) 972 goto out; 973 974 if (ret > 0) 975 ret = 0; 976 else if (nr_parts) 977 ret = add_mtd_partitions(mtd, parts, nr_parts); 978 else if (!device_is_registered(&mtd->dev)) 979 ret = add_mtd_device(mtd); 980 else 981 ret = 0; 982 983 if (ret) 984 goto out; 985 986 /* 987 * FIXME: some drivers unfortunately call this function more than once. 988 * So we have to check if we've already assigned the reboot notifier. 989 * 990 * Generally, we can make multiple calls work for most cases, but it 991 * does cause problems with parse_mtd_partitions() above (e.g., 992 * cmdlineparts will register partitions more than once). 993 */ 994 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call, 995 "MTD already registered\n"); 996 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { 997 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; 998 register_reboot_notifier(&mtd->reboot_notifier); 999 } 1000 1001 ret = mtd_otp_nvmem_add(mtd); 1002 1003 out: 1004 if (ret && device_is_registered(&mtd->dev)) 1005 del_mtd_device(mtd); 1006 1007 return ret; 1008 } 1009 EXPORT_SYMBOL_GPL(mtd_device_parse_register); 1010 1011 /** 1012 * mtd_device_unregister - unregister an existing MTD device. 1013 * 1014 * @master: the MTD device to unregister. This will unregister both the master 1015 * and any partitions if registered. 1016 */ 1017 int mtd_device_unregister(struct mtd_info *master) 1018 { 1019 int err; 1020 1021 if (master->_reboot) 1022 unregister_reboot_notifier(&master->reboot_notifier); 1023 1024 if (master->otp_user_nvmem) 1025 nvmem_unregister(master->otp_user_nvmem); 1026 1027 if (master->otp_factory_nvmem) 1028 nvmem_unregister(master->otp_factory_nvmem); 1029 1030 err = del_mtd_partitions(master); 1031 if (err) 1032 return err; 1033 1034 if (!device_is_registered(&master->dev)) 1035 return 0; 1036 1037 return del_mtd_device(master); 1038 } 1039 EXPORT_SYMBOL_GPL(mtd_device_unregister); 1040 1041 /** 1042 * register_mtd_user - register a 'user' of MTD devices. 1043 * @new: pointer to notifier info structure 1044 * 1045 * Registers a pair of callbacks function to be called upon addition 1046 * or removal of MTD devices. Causes the 'add' callback to be immediately 1047 * invoked for each MTD device currently present in the system. 1048 */ 1049 void register_mtd_user (struct mtd_notifier *new) 1050 { 1051 struct mtd_info *mtd; 1052 1053 mutex_lock(&mtd_table_mutex); 1054 1055 list_add(&new->list, &mtd_notifiers); 1056 1057 __module_get(THIS_MODULE); 1058 1059 mtd_for_each_device(mtd) 1060 new->add(mtd); 1061 1062 mutex_unlock(&mtd_table_mutex); 1063 } 1064 EXPORT_SYMBOL_GPL(register_mtd_user); 1065 1066 /** 1067 * unregister_mtd_user - unregister a 'user' of MTD devices. 1068 * @old: pointer to notifier info structure 1069 * 1070 * Removes a callback function pair from the list of 'users' to be 1071 * notified upon addition or removal of MTD devices. Causes the 1072 * 'remove' callback to be immediately invoked for each MTD device 1073 * currently present in the system. 1074 */ 1075 int unregister_mtd_user (struct mtd_notifier *old) 1076 { 1077 struct mtd_info *mtd; 1078 1079 mutex_lock(&mtd_table_mutex); 1080 1081 module_put(THIS_MODULE); 1082 1083 mtd_for_each_device(mtd) 1084 old->remove(mtd); 1085 1086 list_del(&old->list); 1087 mutex_unlock(&mtd_table_mutex); 1088 return 0; 1089 } 1090 EXPORT_SYMBOL_GPL(unregister_mtd_user); 1091 1092 /** 1093 * get_mtd_device - obtain a validated handle for an MTD device 1094 * @mtd: last known address of the required MTD device 1095 * @num: internal device number of the required MTD device 1096 * 1097 * Given a number and NULL address, return the num'th entry in the device 1098 * table, if any. Given an address and num == -1, search the device table 1099 * for a device with that address and return if it's still present. Given 1100 * both, return the num'th driver only if its address matches. Return 1101 * error code if not. 1102 */ 1103 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 1104 { 1105 struct mtd_info *ret = NULL, *other; 1106 int err = -ENODEV; 1107 1108 mutex_lock(&mtd_table_mutex); 1109 1110 if (num == -1) { 1111 mtd_for_each_device(other) { 1112 if (other == mtd) { 1113 ret = mtd; 1114 break; 1115 } 1116 } 1117 } else if (num >= 0) { 1118 ret = idr_find(&mtd_idr, num); 1119 if (mtd && mtd != ret) 1120 ret = NULL; 1121 } 1122 1123 if (!ret) { 1124 ret = ERR_PTR(err); 1125 goto out; 1126 } 1127 1128 err = __get_mtd_device(ret); 1129 if (err) 1130 ret = ERR_PTR(err); 1131 out: 1132 mutex_unlock(&mtd_table_mutex); 1133 return ret; 1134 } 1135 EXPORT_SYMBOL_GPL(get_mtd_device); 1136 1137 1138 int __get_mtd_device(struct mtd_info *mtd) 1139 { 1140 struct mtd_info *master = mtd_get_master(mtd); 1141 int err; 1142 1143 if (!try_module_get(master->owner)) 1144 return -ENODEV; 1145 1146 if (master->_get_device) { 1147 err = master->_get_device(mtd); 1148 1149 if (err) { 1150 module_put(master->owner); 1151 return err; 1152 } 1153 } 1154 1155 master->usecount++; 1156 1157 while (mtd->parent) { 1158 mtd->usecount++; 1159 mtd = mtd->parent; 1160 } 1161 1162 return 0; 1163 } 1164 EXPORT_SYMBOL_GPL(__get_mtd_device); 1165 1166 /** 1167 * get_mtd_device_nm - obtain a validated handle for an MTD device by 1168 * device name 1169 * @name: MTD device name to open 1170 * 1171 * This function returns MTD device description structure in case of 1172 * success and an error code in case of failure. 1173 */ 1174 struct mtd_info *get_mtd_device_nm(const char *name) 1175 { 1176 int err = -ENODEV; 1177 struct mtd_info *mtd = NULL, *other; 1178 1179 mutex_lock(&mtd_table_mutex); 1180 1181 mtd_for_each_device(other) { 1182 if (!strcmp(name, other->name)) { 1183 mtd = other; 1184 break; 1185 } 1186 } 1187 1188 if (!mtd) 1189 goto out_unlock; 1190 1191 err = __get_mtd_device(mtd); 1192 if (err) 1193 goto out_unlock; 1194 1195 mutex_unlock(&mtd_table_mutex); 1196 return mtd; 1197 1198 out_unlock: 1199 mutex_unlock(&mtd_table_mutex); 1200 return ERR_PTR(err); 1201 } 1202 EXPORT_SYMBOL_GPL(get_mtd_device_nm); 1203 1204 void put_mtd_device(struct mtd_info *mtd) 1205 { 1206 mutex_lock(&mtd_table_mutex); 1207 __put_mtd_device(mtd); 1208 mutex_unlock(&mtd_table_mutex); 1209 1210 } 1211 EXPORT_SYMBOL_GPL(put_mtd_device); 1212 1213 void __put_mtd_device(struct mtd_info *mtd) 1214 { 1215 struct mtd_info *master = mtd_get_master(mtd); 1216 1217 while (mtd->parent) { 1218 --mtd->usecount; 1219 BUG_ON(mtd->usecount < 0); 1220 mtd = mtd->parent; 1221 } 1222 1223 master->usecount--; 1224 1225 if (master->_put_device) 1226 master->_put_device(master); 1227 1228 module_put(master->owner); 1229 } 1230 EXPORT_SYMBOL_GPL(__put_mtd_device); 1231 1232 /* 1233 * Erase is an synchronous operation. Device drivers are epected to return a 1234 * negative error code if the operation failed and update instr->fail_addr 1235 * to point the portion that was not properly erased. 1236 */ 1237 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 1238 { 1239 struct mtd_info *master = mtd_get_master(mtd); 1240 u64 mst_ofs = mtd_get_master_ofs(mtd, 0); 1241 struct erase_info adjinstr; 1242 int ret; 1243 1244 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 1245 adjinstr = *instr; 1246 1247 if (!mtd->erasesize || !master->_erase) 1248 return -ENOTSUPP; 1249 1250 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) 1251 return -EINVAL; 1252 if (!(mtd->flags & MTD_WRITEABLE)) 1253 return -EROFS; 1254 1255 if (!instr->len) 1256 return 0; 1257 1258 ledtrig_mtd_activity(); 1259 1260 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1261 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) * 1262 master->erasesize; 1263 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) * 1264 master->erasesize) - 1265 adjinstr.addr; 1266 } 1267 1268 adjinstr.addr += mst_ofs; 1269 1270 ret = master->_erase(master, &adjinstr); 1271 1272 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) { 1273 instr->fail_addr = adjinstr.fail_addr - mst_ofs; 1274 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1275 instr->fail_addr = mtd_div_by_eb(instr->fail_addr, 1276 master); 1277 instr->fail_addr *= mtd->erasesize; 1278 } 1279 } 1280 1281 return ret; 1282 } 1283 EXPORT_SYMBOL_GPL(mtd_erase); 1284 1285 /* 1286 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. 1287 */ 1288 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1289 void **virt, resource_size_t *phys) 1290 { 1291 struct mtd_info *master = mtd_get_master(mtd); 1292 1293 *retlen = 0; 1294 *virt = NULL; 1295 if (phys) 1296 *phys = 0; 1297 if (!master->_point) 1298 return -EOPNOTSUPP; 1299 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1300 return -EINVAL; 1301 if (!len) 1302 return 0; 1303 1304 from = mtd_get_master_ofs(mtd, from); 1305 return master->_point(master, from, len, retlen, virt, phys); 1306 } 1307 EXPORT_SYMBOL_GPL(mtd_point); 1308 1309 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 1310 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 1311 { 1312 struct mtd_info *master = mtd_get_master(mtd); 1313 1314 if (!master->_unpoint) 1315 return -EOPNOTSUPP; 1316 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1317 return -EINVAL; 1318 if (!len) 1319 return 0; 1320 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len); 1321 } 1322 EXPORT_SYMBOL_GPL(mtd_unpoint); 1323 1324 /* 1325 * Allow NOMMU mmap() to directly map the device (if not NULL) 1326 * - return the address to which the offset maps 1327 * - return -ENOSYS to indicate refusal to do the mapping 1328 */ 1329 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, 1330 unsigned long offset, unsigned long flags) 1331 { 1332 size_t retlen; 1333 void *virt; 1334 int ret; 1335 1336 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); 1337 if (ret) 1338 return ret; 1339 if (retlen != len) { 1340 mtd_unpoint(mtd, offset, retlen); 1341 return -ENOSYS; 1342 } 1343 return (unsigned long)virt; 1344 } 1345 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); 1346 1347 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master, 1348 const struct mtd_ecc_stats *old_stats) 1349 { 1350 struct mtd_ecc_stats diff; 1351 1352 if (master == mtd) 1353 return; 1354 1355 diff = master->ecc_stats; 1356 diff.failed -= old_stats->failed; 1357 diff.corrected -= old_stats->corrected; 1358 1359 while (mtd->parent) { 1360 mtd->ecc_stats.failed += diff.failed; 1361 mtd->ecc_stats.corrected += diff.corrected; 1362 mtd = mtd->parent; 1363 } 1364 } 1365 1366 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1367 u_char *buf) 1368 { 1369 struct mtd_oob_ops ops = { 1370 .len = len, 1371 .datbuf = buf, 1372 }; 1373 int ret; 1374 1375 ret = mtd_read_oob(mtd, from, &ops); 1376 *retlen = ops.retlen; 1377 1378 return ret; 1379 } 1380 EXPORT_SYMBOL_GPL(mtd_read); 1381 1382 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1383 const u_char *buf) 1384 { 1385 struct mtd_oob_ops ops = { 1386 .len = len, 1387 .datbuf = (u8 *)buf, 1388 }; 1389 int ret; 1390 1391 ret = mtd_write_oob(mtd, to, &ops); 1392 *retlen = ops.retlen; 1393 1394 return ret; 1395 } 1396 EXPORT_SYMBOL_GPL(mtd_write); 1397 1398 /* 1399 * In blackbox flight recorder like scenarios we want to make successful writes 1400 * in interrupt context. panic_write() is only intended to be called when its 1401 * known the kernel is about to panic and we need the write to succeed. Since 1402 * the kernel is not going to be running for much longer, this function can 1403 * break locks and delay to ensure the write succeeds (but not sleep). 1404 */ 1405 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1406 const u_char *buf) 1407 { 1408 struct mtd_info *master = mtd_get_master(mtd); 1409 1410 *retlen = 0; 1411 if (!master->_panic_write) 1412 return -EOPNOTSUPP; 1413 if (to < 0 || to >= mtd->size || len > mtd->size - to) 1414 return -EINVAL; 1415 if (!(mtd->flags & MTD_WRITEABLE)) 1416 return -EROFS; 1417 if (!len) 1418 return 0; 1419 if (!master->oops_panic_write) 1420 master->oops_panic_write = true; 1421 1422 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len, 1423 retlen, buf); 1424 } 1425 EXPORT_SYMBOL_GPL(mtd_panic_write); 1426 1427 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, 1428 struct mtd_oob_ops *ops) 1429 { 1430 /* 1431 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving 1432 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in 1433 * this case. 1434 */ 1435 if (!ops->datbuf) 1436 ops->len = 0; 1437 1438 if (!ops->oobbuf) 1439 ops->ooblen = 0; 1440 1441 if (offs < 0 || offs + ops->len > mtd->size) 1442 return -EINVAL; 1443 1444 if (ops->ooblen) { 1445 size_t maxooblen; 1446 1447 if (ops->ooboffs >= mtd_oobavail(mtd, ops)) 1448 return -EINVAL; 1449 1450 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) - 1451 mtd_div_by_ws(offs, mtd)) * 1452 mtd_oobavail(mtd, ops)) - ops->ooboffs; 1453 if (ops->ooblen > maxooblen) 1454 return -EINVAL; 1455 } 1456 1457 return 0; 1458 } 1459 1460 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from, 1461 struct mtd_oob_ops *ops) 1462 { 1463 struct mtd_info *master = mtd_get_master(mtd); 1464 int ret; 1465 1466 from = mtd_get_master_ofs(mtd, from); 1467 if (master->_read_oob) 1468 ret = master->_read_oob(master, from, ops); 1469 else 1470 ret = master->_read(master, from, ops->len, &ops->retlen, 1471 ops->datbuf); 1472 1473 return ret; 1474 } 1475 1476 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to, 1477 struct mtd_oob_ops *ops) 1478 { 1479 struct mtd_info *master = mtd_get_master(mtd); 1480 int ret; 1481 1482 to = mtd_get_master_ofs(mtd, to); 1483 if (master->_write_oob) 1484 ret = master->_write_oob(master, to, ops); 1485 else 1486 ret = master->_write(master, to, ops->len, &ops->retlen, 1487 ops->datbuf); 1488 1489 return ret; 1490 } 1491 1492 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read, 1493 struct mtd_oob_ops *ops) 1494 { 1495 struct mtd_info *master = mtd_get_master(mtd); 1496 int ngroups = mtd_pairing_groups(master); 1497 int npairs = mtd_wunit_per_eb(master) / ngroups; 1498 struct mtd_oob_ops adjops = *ops; 1499 unsigned int wunit, oobavail; 1500 struct mtd_pairing_info info; 1501 int max_bitflips = 0; 1502 u32 ebofs, pageofs; 1503 loff_t base, pos; 1504 1505 ebofs = mtd_mod_by_eb(start, mtd); 1506 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize; 1507 info.group = 0; 1508 info.pair = mtd_div_by_ws(ebofs, mtd); 1509 pageofs = mtd_mod_by_ws(ebofs, mtd); 1510 oobavail = mtd_oobavail(mtd, ops); 1511 1512 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { 1513 int ret; 1514 1515 if (info.pair >= npairs) { 1516 info.pair = 0; 1517 base += master->erasesize; 1518 } 1519 1520 wunit = mtd_pairing_info_to_wunit(master, &info); 1521 pos = mtd_wunit_to_offset(mtd, base, wunit); 1522 1523 adjops.len = ops->len - ops->retlen; 1524 if (adjops.len > mtd->writesize - pageofs) 1525 adjops.len = mtd->writesize - pageofs; 1526 1527 adjops.ooblen = ops->ooblen - ops->oobretlen; 1528 if (adjops.ooblen > oobavail - adjops.ooboffs) 1529 adjops.ooblen = oobavail - adjops.ooboffs; 1530 1531 if (read) { 1532 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops); 1533 if (ret > 0) 1534 max_bitflips = max(max_bitflips, ret); 1535 } else { 1536 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops); 1537 } 1538 1539 if (ret < 0) 1540 return ret; 1541 1542 max_bitflips = max(max_bitflips, ret); 1543 ops->retlen += adjops.retlen; 1544 ops->oobretlen += adjops.oobretlen; 1545 adjops.datbuf += adjops.retlen; 1546 adjops.oobbuf += adjops.oobretlen; 1547 adjops.ooboffs = 0; 1548 pageofs = 0; 1549 info.pair++; 1550 } 1551 1552 return max_bitflips; 1553 } 1554 1555 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 1556 { 1557 struct mtd_info *master = mtd_get_master(mtd); 1558 struct mtd_ecc_stats old_stats = master->ecc_stats; 1559 int ret_code; 1560 1561 ops->retlen = ops->oobretlen = 0; 1562 1563 ret_code = mtd_check_oob_ops(mtd, from, ops); 1564 if (ret_code) 1565 return ret_code; 1566 1567 ledtrig_mtd_activity(); 1568 1569 /* Check the validity of a potential fallback on mtd->_read */ 1570 if (!master->_read_oob && (!master->_read || ops->oobbuf)) 1571 return -EOPNOTSUPP; 1572 1573 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 1574 ret_code = mtd_io_emulated_slc(mtd, from, true, ops); 1575 else 1576 ret_code = mtd_read_oob_std(mtd, from, ops); 1577 1578 mtd_update_ecc_stats(mtd, master, &old_stats); 1579 1580 /* 1581 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics 1582 * similar to mtd->_read(), returning a non-negative integer 1583 * representing max bitflips. In other cases, mtd->_read_oob() may 1584 * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). 1585 */ 1586 if (unlikely(ret_code < 0)) 1587 return ret_code; 1588 if (mtd->ecc_strength == 0) 1589 return 0; /* device lacks ecc */ 1590 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 1591 } 1592 EXPORT_SYMBOL_GPL(mtd_read_oob); 1593 1594 int mtd_write_oob(struct mtd_info *mtd, loff_t to, 1595 struct mtd_oob_ops *ops) 1596 { 1597 struct mtd_info *master = mtd_get_master(mtd); 1598 int ret; 1599 1600 ops->retlen = ops->oobretlen = 0; 1601 1602 if (!(mtd->flags & MTD_WRITEABLE)) 1603 return -EROFS; 1604 1605 ret = mtd_check_oob_ops(mtd, to, ops); 1606 if (ret) 1607 return ret; 1608 1609 ledtrig_mtd_activity(); 1610 1611 /* Check the validity of a potential fallback on mtd->_write */ 1612 if (!master->_write_oob && (!master->_write || ops->oobbuf)) 1613 return -EOPNOTSUPP; 1614 1615 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 1616 return mtd_io_emulated_slc(mtd, to, false, ops); 1617 1618 return mtd_write_oob_std(mtd, to, ops); 1619 } 1620 EXPORT_SYMBOL_GPL(mtd_write_oob); 1621 1622 /** 1623 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section 1624 * @mtd: MTD device structure 1625 * @section: ECC section. Depending on the layout you may have all the ECC 1626 * bytes stored in a single contiguous section, or one section 1627 * per ECC chunk (and sometime several sections for a single ECC 1628 * ECC chunk) 1629 * @oobecc: OOB region struct filled with the appropriate ECC position 1630 * information 1631 * 1632 * This function returns ECC section information in the OOB area. If you want 1633 * to get all the ECC bytes information, then you should call 1634 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. 1635 * 1636 * Returns zero on success, a negative error code otherwise. 1637 */ 1638 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 1639 struct mtd_oob_region *oobecc) 1640 { 1641 struct mtd_info *master = mtd_get_master(mtd); 1642 1643 memset(oobecc, 0, sizeof(*oobecc)); 1644 1645 if (!master || section < 0) 1646 return -EINVAL; 1647 1648 if (!master->ooblayout || !master->ooblayout->ecc) 1649 return -ENOTSUPP; 1650 1651 return master->ooblayout->ecc(master, section, oobecc); 1652 } 1653 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); 1654 1655 /** 1656 * mtd_ooblayout_free - Get the OOB region definition of a specific free 1657 * section 1658 * @mtd: MTD device structure 1659 * @section: Free section you are interested in. Depending on the layout 1660 * you may have all the free bytes stored in a single contiguous 1661 * section, or one section per ECC chunk plus an extra section 1662 * for the remaining bytes (or other funky layout). 1663 * @oobfree: OOB region struct filled with the appropriate free position 1664 * information 1665 * 1666 * This function returns free bytes position in the OOB area. If you want 1667 * to get all the free bytes information, then you should call 1668 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. 1669 * 1670 * Returns zero on success, a negative error code otherwise. 1671 */ 1672 int mtd_ooblayout_free(struct mtd_info *mtd, int section, 1673 struct mtd_oob_region *oobfree) 1674 { 1675 struct mtd_info *master = mtd_get_master(mtd); 1676 1677 memset(oobfree, 0, sizeof(*oobfree)); 1678 1679 if (!master || section < 0) 1680 return -EINVAL; 1681 1682 if (!master->ooblayout || !master->ooblayout->free) 1683 return -ENOTSUPP; 1684 1685 return master->ooblayout->free(master, section, oobfree); 1686 } 1687 EXPORT_SYMBOL_GPL(mtd_ooblayout_free); 1688 1689 /** 1690 * mtd_ooblayout_find_region - Find the region attached to a specific byte 1691 * @mtd: mtd info structure 1692 * @byte: the byte we are searching for 1693 * @sectionp: pointer where the section id will be stored 1694 * @oobregion: used to retrieve the ECC position 1695 * @iter: iterator function. Should be either mtd_ooblayout_free or 1696 * mtd_ooblayout_ecc depending on the region type you're searching for 1697 * 1698 * This function returns the section id and oobregion information of a 1699 * specific byte. For example, say you want to know where the 4th ECC byte is 1700 * stored, you'll use: 1701 * 1702 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc); 1703 * 1704 * Returns zero on success, a negative error code otherwise. 1705 */ 1706 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, 1707 int *sectionp, struct mtd_oob_region *oobregion, 1708 int (*iter)(struct mtd_info *, 1709 int section, 1710 struct mtd_oob_region *oobregion)) 1711 { 1712 int pos = 0, ret, section = 0; 1713 1714 memset(oobregion, 0, sizeof(*oobregion)); 1715 1716 while (1) { 1717 ret = iter(mtd, section, oobregion); 1718 if (ret) 1719 return ret; 1720 1721 if (pos + oobregion->length > byte) 1722 break; 1723 1724 pos += oobregion->length; 1725 section++; 1726 } 1727 1728 /* 1729 * Adjust region info to make it start at the beginning at the 1730 * 'start' ECC byte. 1731 */ 1732 oobregion->offset += byte - pos; 1733 oobregion->length -= byte - pos; 1734 *sectionp = section; 1735 1736 return 0; 1737 } 1738 1739 /** 1740 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific 1741 * ECC byte 1742 * @mtd: mtd info structure 1743 * @eccbyte: the byte we are searching for 1744 * @section: pointer where the section id will be stored 1745 * @oobregion: OOB region information 1746 * 1747 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC 1748 * byte. 1749 * 1750 * Returns zero on success, a negative error code otherwise. 1751 */ 1752 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, 1753 int *section, 1754 struct mtd_oob_region *oobregion) 1755 { 1756 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion, 1757 mtd_ooblayout_ecc); 1758 } 1759 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); 1760 1761 /** 1762 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer 1763 * @mtd: mtd info structure 1764 * @buf: destination buffer to store OOB bytes 1765 * @oobbuf: OOB buffer 1766 * @start: first byte to retrieve 1767 * @nbytes: number of bytes to retrieve 1768 * @iter: section iterator 1769 * 1770 * Extract bytes attached to a specific category (ECC or free) 1771 * from the OOB buffer and copy them into buf. 1772 * 1773 * Returns zero on success, a negative error code otherwise. 1774 */ 1775 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, 1776 const u8 *oobbuf, int start, int nbytes, 1777 int (*iter)(struct mtd_info *, 1778 int section, 1779 struct mtd_oob_region *oobregion)) 1780 { 1781 struct mtd_oob_region oobregion; 1782 int section, ret; 1783 1784 ret = mtd_ooblayout_find_region(mtd, start, §ion, 1785 &oobregion, iter); 1786 1787 while (!ret) { 1788 int cnt; 1789 1790 cnt = min_t(int, nbytes, oobregion.length); 1791 memcpy(buf, oobbuf + oobregion.offset, cnt); 1792 buf += cnt; 1793 nbytes -= cnt; 1794 1795 if (!nbytes) 1796 break; 1797 1798 ret = iter(mtd, ++section, &oobregion); 1799 } 1800 1801 return ret; 1802 } 1803 1804 /** 1805 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer 1806 * @mtd: mtd info structure 1807 * @buf: source buffer to get OOB bytes from 1808 * @oobbuf: OOB buffer 1809 * @start: first OOB byte to set 1810 * @nbytes: number of OOB bytes to set 1811 * @iter: section iterator 1812 * 1813 * Fill the OOB buffer with data provided in buf. The category (ECC or free) 1814 * is selected by passing the appropriate iterator. 1815 * 1816 * Returns zero on success, a negative error code otherwise. 1817 */ 1818 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, 1819 u8 *oobbuf, int start, int nbytes, 1820 int (*iter)(struct mtd_info *, 1821 int section, 1822 struct mtd_oob_region *oobregion)) 1823 { 1824 struct mtd_oob_region oobregion; 1825 int section, ret; 1826 1827 ret = mtd_ooblayout_find_region(mtd, start, §ion, 1828 &oobregion, iter); 1829 1830 while (!ret) { 1831 int cnt; 1832 1833 cnt = min_t(int, nbytes, oobregion.length); 1834 memcpy(oobbuf + oobregion.offset, buf, cnt); 1835 buf += cnt; 1836 nbytes -= cnt; 1837 1838 if (!nbytes) 1839 break; 1840 1841 ret = iter(mtd, ++section, &oobregion); 1842 } 1843 1844 return ret; 1845 } 1846 1847 /** 1848 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category 1849 * @mtd: mtd info structure 1850 * @iter: category iterator 1851 * 1852 * Count the number of bytes in a given category. 1853 * 1854 * Returns a positive value on success, a negative error code otherwise. 1855 */ 1856 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, 1857 int (*iter)(struct mtd_info *, 1858 int section, 1859 struct mtd_oob_region *oobregion)) 1860 { 1861 struct mtd_oob_region oobregion; 1862 int section = 0, ret, nbytes = 0; 1863 1864 while (1) { 1865 ret = iter(mtd, section++, &oobregion); 1866 if (ret) { 1867 if (ret == -ERANGE) 1868 ret = nbytes; 1869 break; 1870 } 1871 1872 nbytes += oobregion.length; 1873 } 1874 1875 return ret; 1876 } 1877 1878 /** 1879 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer 1880 * @mtd: mtd info structure 1881 * @eccbuf: destination buffer to store ECC bytes 1882 * @oobbuf: OOB buffer 1883 * @start: first ECC byte to retrieve 1884 * @nbytes: number of ECC bytes to retrieve 1885 * 1886 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. 1887 * 1888 * Returns zero on success, a negative error code otherwise. 1889 */ 1890 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, 1891 const u8 *oobbuf, int start, int nbytes) 1892 { 1893 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes, 1894 mtd_ooblayout_ecc); 1895 } 1896 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); 1897 1898 /** 1899 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer 1900 * @mtd: mtd info structure 1901 * @eccbuf: source buffer to get ECC bytes from 1902 * @oobbuf: OOB buffer 1903 * @start: first ECC byte to set 1904 * @nbytes: number of ECC bytes to set 1905 * 1906 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. 1907 * 1908 * Returns zero on success, a negative error code otherwise. 1909 */ 1910 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, 1911 u8 *oobbuf, int start, int nbytes) 1912 { 1913 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes, 1914 mtd_ooblayout_ecc); 1915 } 1916 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); 1917 1918 /** 1919 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer 1920 * @mtd: mtd info structure 1921 * @databuf: destination buffer to store ECC bytes 1922 * @oobbuf: OOB buffer 1923 * @start: first ECC byte to retrieve 1924 * @nbytes: number of ECC bytes to retrieve 1925 * 1926 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. 1927 * 1928 * Returns zero on success, a negative error code otherwise. 1929 */ 1930 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, 1931 const u8 *oobbuf, int start, int nbytes) 1932 { 1933 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes, 1934 mtd_ooblayout_free); 1935 } 1936 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); 1937 1938 /** 1939 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer 1940 * @mtd: mtd info structure 1941 * @databuf: source buffer to get data bytes from 1942 * @oobbuf: OOB buffer 1943 * @start: first ECC byte to set 1944 * @nbytes: number of ECC bytes to set 1945 * 1946 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes. 1947 * 1948 * Returns zero on success, a negative error code otherwise. 1949 */ 1950 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, 1951 u8 *oobbuf, int start, int nbytes) 1952 { 1953 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes, 1954 mtd_ooblayout_free); 1955 } 1956 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); 1957 1958 /** 1959 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB 1960 * @mtd: mtd info structure 1961 * 1962 * Works like mtd_ooblayout_count_bytes(), except it count free bytes. 1963 * 1964 * Returns zero on success, a negative error code otherwise. 1965 */ 1966 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) 1967 { 1968 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free); 1969 } 1970 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); 1971 1972 /** 1973 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB 1974 * @mtd: mtd info structure 1975 * 1976 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. 1977 * 1978 * Returns zero on success, a negative error code otherwise. 1979 */ 1980 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) 1981 { 1982 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc); 1983 } 1984 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); 1985 1986 /* 1987 * Method to access the protection register area, present in some flash 1988 * devices. The user data is one time programmable but the factory data is read 1989 * only. 1990 */ 1991 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1992 struct otp_info *buf) 1993 { 1994 struct mtd_info *master = mtd_get_master(mtd); 1995 1996 if (!master->_get_fact_prot_info) 1997 return -EOPNOTSUPP; 1998 if (!len) 1999 return 0; 2000 return master->_get_fact_prot_info(master, len, retlen, buf); 2001 } 2002 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); 2003 2004 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 2005 size_t *retlen, u_char *buf) 2006 { 2007 struct mtd_info *master = mtd_get_master(mtd); 2008 2009 *retlen = 0; 2010 if (!master->_read_fact_prot_reg) 2011 return -EOPNOTSUPP; 2012 if (!len) 2013 return 0; 2014 return master->_read_fact_prot_reg(master, from, len, retlen, buf); 2015 } 2016 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); 2017 2018 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 2019 struct otp_info *buf) 2020 { 2021 struct mtd_info *master = mtd_get_master(mtd); 2022 2023 if (!master->_get_user_prot_info) 2024 return -EOPNOTSUPP; 2025 if (!len) 2026 return 0; 2027 return master->_get_user_prot_info(master, len, retlen, buf); 2028 } 2029 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); 2030 2031 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 2032 size_t *retlen, u_char *buf) 2033 { 2034 struct mtd_info *master = mtd_get_master(mtd); 2035 2036 *retlen = 0; 2037 if (!master->_read_user_prot_reg) 2038 return -EOPNOTSUPP; 2039 if (!len) 2040 return 0; 2041 return master->_read_user_prot_reg(master, from, len, retlen, buf); 2042 } 2043 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); 2044 2045 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, 2046 size_t *retlen, const u_char *buf) 2047 { 2048 struct mtd_info *master = mtd_get_master(mtd); 2049 int ret; 2050 2051 *retlen = 0; 2052 if (!master->_write_user_prot_reg) 2053 return -EOPNOTSUPP; 2054 if (!len) 2055 return 0; 2056 ret = master->_write_user_prot_reg(master, to, len, retlen, buf); 2057 if (ret) 2058 return ret; 2059 2060 /* 2061 * If no data could be written at all, we are out of memory and 2062 * must return -ENOSPC. 2063 */ 2064 return (*retlen) ? 0 : -ENOSPC; 2065 } 2066 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); 2067 2068 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 2069 { 2070 struct mtd_info *master = mtd_get_master(mtd); 2071 2072 if (!master->_lock_user_prot_reg) 2073 return -EOPNOTSUPP; 2074 if (!len) 2075 return 0; 2076 return master->_lock_user_prot_reg(master, from, len); 2077 } 2078 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); 2079 2080 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 2081 { 2082 struct mtd_info *master = mtd_get_master(mtd); 2083 2084 if (!master->_erase_user_prot_reg) 2085 return -EOPNOTSUPP; 2086 if (!len) 2087 return 0; 2088 return master->_erase_user_prot_reg(master, from, len); 2089 } 2090 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg); 2091 2092 /* Chip-supported device locking */ 2093 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2094 { 2095 struct mtd_info *master = mtd_get_master(mtd); 2096 2097 if (!master->_lock) 2098 return -EOPNOTSUPP; 2099 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 2100 return -EINVAL; 2101 if (!len) 2102 return 0; 2103 2104 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 2105 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2106 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 2107 } 2108 2109 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); 2110 } 2111 EXPORT_SYMBOL_GPL(mtd_lock); 2112 2113 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2114 { 2115 struct mtd_info *master = mtd_get_master(mtd); 2116 2117 if (!master->_unlock) 2118 return -EOPNOTSUPP; 2119 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 2120 return -EINVAL; 2121 if (!len) 2122 return 0; 2123 2124 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 2125 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2126 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 2127 } 2128 2129 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); 2130 } 2131 EXPORT_SYMBOL_GPL(mtd_unlock); 2132 2133 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2134 { 2135 struct mtd_info *master = mtd_get_master(mtd); 2136 2137 if (!master->_is_locked) 2138 return -EOPNOTSUPP; 2139 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 2140 return -EINVAL; 2141 if (!len) 2142 return 0; 2143 2144 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 2145 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2146 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 2147 } 2148 2149 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); 2150 } 2151 EXPORT_SYMBOL_GPL(mtd_is_locked); 2152 2153 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) 2154 { 2155 struct mtd_info *master = mtd_get_master(mtd); 2156 2157 if (ofs < 0 || ofs >= mtd->size) 2158 return -EINVAL; 2159 if (!master->_block_isreserved) 2160 return 0; 2161 2162 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2163 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2164 2165 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); 2166 } 2167 EXPORT_SYMBOL_GPL(mtd_block_isreserved); 2168 2169 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) 2170 { 2171 struct mtd_info *master = mtd_get_master(mtd); 2172 2173 if (ofs < 0 || ofs >= mtd->size) 2174 return -EINVAL; 2175 if (!master->_block_isbad) 2176 return 0; 2177 2178 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2179 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2180 2181 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); 2182 } 2183 EXPORT_SYMBOL_GPL(mtd_block_isbad); 2184 2185 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) 2186 { 2187 struct mtd_info *master = mtd_get_master(mtd); 2188 int ret; 2189 2190 if (!master->_block_markbad) 2191 return -EOPNOTSUPP; 2192 if (ofs < 0 || ofs >= mtd->size) 2193 return -EINVAL; 2194 if (!(mtd->flags & MTD_WRITEABLE)) 2195 return -EROFS; 2196 2197 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2198 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2199 2200 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); 2201 if (ret) 2202 return ret; 2203 2204 while (mtd->parent) { 2205 mtd->ecc_stats.badblocks++; 2206 mtd = mtd->parent; 2207 } 2208 2209 return 0; 2210 } 2211 EXPORT_SYMBOL_GPL(mtd_block_markbad); 2212 2213 /* 2214 * default_mtd_writev - the default writev method 2215 * @mtd: mtd device description object pointer 2216 * @vecs: the vectors to write 2217 * @count: count of vectors in @vecs 2218 * @to: the MTD device offset to write to 2219 * @retlen: on exit contains the count of bytes written to the MTD device. 2220 * 2221 * This function returns zero in case of success and a negative error code in 2222 * case of failure. 2223 */ 2224 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 2225 unsigned long count, loff_t to, size_t *retlen) 2226 { 2227 unsigned long i; 2228 size_t totlen = 0, thislen; 2229 int ret = 0; 2230 2231 for (i = 0; i < count; i++) { 2232 if (!vecs[i].iov_len) 2233 continue; 2234 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, 2235 vecs[i].iov_base); 2236 totlen += thislen; 2237 if (ret || thislen != vecs[i].iov_len) 2238 break; 2239 to += vecs[i].iov_len; 2240 } 2241 *retlen = totlen; 2242 return ret; 2243 } 2244 2245 /* 2246 * mtd_writev - the vector-based MTD write method 2247 * @mtd: mtd device description object pointer 2248 * @vecs: the vectors to write 2249 * @count: count of vectors in @vecs 2250 * @to: the MTD device offset to write to 2251 * @retlen: on exit contains the count of bytes written to the MTD device. 2252 * 2253 * This function returns zero in case of success and a negative error code in 2254 * case of failure. 2255 */ 2256 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 2257 unsigned long count, loff_t to, size_t *retlen) 2258 { 2259 struct mtd_info *master = mtd_get_master(mtd); 2260 2261 *retlen = 0; 2262 if (!(mtd->flags & MTD_WRITEABLE)) 2263 return -EROFS; 2264 2265 if (!master->_writev) 2266 return default_mtd_writev(mtd, vecs, count, to, retlen); 2267 2268 return master->_writev(master, vecs, count, 2269 mtd_get_master_ofs(mtd, to), retlen); 2270 } 2271 EXPORT_SYMBOL_GPL(mtd_writev); 2272 2273 /** 2274 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size 2275 * @mtd: mtd device description object pointer 2276 * @size: a pointer to the ideal or maximum size of the allocation, points 2277 * to the actual allocation size on success. 2278 * 2279 * This routine attempts to allocate a contiguous kernel buffer up to 2280 * the specified size, backing off the size of the request exponentially 2281 * until the request succeeds or until the allocation size falls below 2282 * the system page size. This attempts to make sure it does not adversely 2283 * impact system performance, so when allocating more than one page, we 2284 * ask the memory allocator to avoid re-trying, swapping, writing back 2285 * or performing I/O. 2286 * 2287 * Note, this function also makes sure that the allocated buffer is aligned to 2288 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. 2289 * 2290 * This is called, for example by mtd_{read,write} and jffs2_scan_medium, 2291 * to handle smaller (i.e. degraded) buffer allocations under low- or 2292 * fragmented-memory situations where such reduced allocations, from a 2293 * requested ideal, are allowed. 2294 * 2295 * Returns a pointer to the allocated buffer on success; otherwise, NULL. 2296 */ 2297 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) 2298 { 2299 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY; 2300 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); 2301 void *kbuf; 2302 2303 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); 2304 2305 while (*size > min_alloc) { 2306 kbuf = kmalloc(*size, flags); 2307 if (kbuf) 2308 return kbuf; 2309 2310 *size >>= 1; 2311 *size = ALIGN(*size, mtd->writesize); 2312 } 2313 2314 /* 2315 * For the last resort allocation allow 'kmalloc()' to do all sorts of 2316 * things (write-back, dropping caches, etc) by using GFP_KERNEL. 2317 */ 2318 return kmalloc(*size, GFP_KERNEL); 2319 } 2320 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); 2321 2322 #ifdef CONFIG_PROC_FS 2323 2324 /*====================================================================*/ 2325 /* Support for /proc/mtd */ 2326 2327 static int mtd_proc_show(struct seq_file *m, void *v) 2328 { 2329 struct mtd_info *mtd; 2330 2331 seq_puts(m, "dev: size erasesize name\n"); 2332 mutex_lock(&mtd_table_mutex); 2333 mtd_for_each_device(mtd) { 2334 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n", 2335 mtd->index, (unsigned long long)mtd->size, 2336 mtd->erasesize, mtd->name); 2337 } 2338 mutex_unlock(&mtd_table_mutex); 2339 return 0; 2340 } 2341 #endif /* CONFIG_PROC_FS */ 2342 2343 /*====================================================================*/ 2344 /* Init code */ 2345 2346 static struct backing_dev_info * __init mtd_bdi_init(const char *name) 2347 { 2348 struct backing_dev_info *bdi; 2349 int ret; 2350 2351 bdi = bdi_alloc(NUMA_NO_NODE); 2352 if (!bdi) 2353 return ERR_PTR(-ENOMEM); 2354 bdi->ra_pages = 0; 2355 bdi->io_pages = 0; 2356 2357 /* 2358 * We put '-0' suffix to the name to get the same name format as we 2359 * used to get. Since this is called only once, we get a unique name. 2360 */ 2361 ret = bdi_register(bdi, "%.28s-0", name); 2362 if (ret) 2363 bdi_put(bdi); 2364 2365 return ret ? ERR_PTR(ret) : bdi; 2366 } 2367 2368 static struct proc_dir_entry *proc_mtd; 2369 2370 static int __init init_mtd(void) 2371 { 2372 int ret; 2373 2374 ret = class_register(&mtd_class); 2375 if (ret) 2376 goto err_reg; 2377 2378 mtd_bdi = mtd_bdi_init("mtd"); 2379 if (IS_ERR(mtd_bdi)) { 2380 ret = PTR_ERR(mtd_bdi); 2381 goto err_bdi; 2382 } 2383 2384 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show); 2385 2386 ret = init_mtdchar(); 2387 if (ret) 2388 goto out_procfs; 2389 2390 dfs_dir_mtd = debugfs_create_dir("mtd", NULL); 2391 2392 return 0; 2393 2394 out_procfs: 2395 if (proc_mtd) 2396 remove_proc_entry("mtd", NULL); 2397 bdi_put(mtd_bdi); 2398 err_bdi: 2399 class_unregister(&mtd_class); 2400 err_reg: 2401 pr_err("Error registering mtd class or bdi: %d\n", ret); 2402 return ret; 2403 } 2404 2405 static void __exit cleanup_mtd(void) 2406 { 2407 debugfs_remove_recursive(dfs_dir_mtd); 2408 cleanup_mtdchar(); 2409 if (proc_mtd) 2410 remove_proc_entry("mtd", NULL); 2411 class_unregister(&mtd_class); 2412 bdi_put(mtd_bdi); 2413 idr_destroy(&mtd_idr); 2414 } 2415 2416 module_init(init_mtd); 2417 module_exit(cleanup_mtd); 2418 2419 MODULE_LICENSE("GPL"); 2420 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 2421 MODULE_DESCRIPTION("Core MTD registration and access routines"); 2422