1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Core registration and callback routines for MTD 4 * drivers and users. 5 * 6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 7 * Copyright © 2006 Red Hat UK Limited 8 */ 9 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/ptrace.h> 13 #include <linux/seq_file.h> 14 #include <linux/string.h> 15 #include <linux/timer.h> 16 #include <linux/major.h> 17 #include <linux/fs.h> 18 #include <linux/err.h> 19 #include <linux/ioctl.h> 20 #include <linux/init.h> 21 #include <linux/of.h> 22 #include <linux/proc_fs.h> 23 #include <linux/idr.h> 24 #include <linux/backing-dev.h> 25 #include <linux/gfp.h> 26 #include <linux/slab.h> 27 #include <linux/reboot.h> 28 #include <linux/leds.h> 29 #include <linux/debugfs.h> 30 #include <linux/nvmem-provider.h> 31 32 #include <linux/mtd/mtd.h> 33 #include <linux/mtd/partitions.h> 34 35 #include "mtdcore.h" 36 37 struct backing_dev_info *mtd_bdi; 38 39 #ifdef CONFIG_PM_SLEEP 40 41 static int mtd_cls_suspend(struct device *dev) 42 { 43 struct mtd_info *mtd = dev_get_drvdata(dev); 44 45 return mtd ? mtd_suspend(mtd) : 0; 46 } 47 48 static int mtd_cls_resume(struct device *dev) 49 { 50 struct mtd_info *mtd = dev_get_drvdata(dev); 51 52 if (mtd) 53 mtd_resume(mtd); 54 return 0; 55 } 56 57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume); 58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops) 59 #else 60 #define MTD_CLS_PM_OPS NULL 61 #endif 62 63 static struct class mtd_class = { 64 .name = "mtd", 65 .owner = THIS_MODULE, 66 .pm = MTD_CLS_PM_OPS, 67 }; 68 69 static DEFINE_IDR(mtd_idr); 70 71 /* These are exported solely for the purpose of mtd_blkdevs.c. You 72 should not use them for _anything_ else */ 73 DEFINE_MUTEX(mtd_table_mutex); 74 EXPORT_SYMBOL_GPL(mtd_table_mutex); 75 76 struct mtd_info *__mtd_next_device(int i) 77 { 78 return idr_get_next(&mtd_idr, &i); 79 } 80 EXPORT_SYMBOL_GPL(__mtd_next_device); 81 82 static LIST_HEAD(mtd_notifiers); 83 84 85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) 86 87 /* REVISIT once MTD uses the driver model better, whoever allocates 88 * the mtd_info will probably want to use the release() hook... 89 */ 90 static void mtd_release(struct device *dev) 91 { 92 struct mtd_info *mtd = dev_get_drvdata(dev); 93 dev_t index = MTD_DEVT(mtd->index); 94 95 /* remove /dev/mtdXro node */ 96 device_destroy(&mtd_class, index + 1); 97 } 98 99 static ssize_t mtd_type_show(struct device *dev, 100 struct device_attribute *attr, char *buf) 101 { 102 struct mtd_info *mtd = dev_get_drvdata(dev); 103 char *type; 104 105 switch (mtd->type) { 106 case MTD_ABSENT: 107 type = "absent"; 108 break; 109 case MTD_RAM: 110 type = "ram"; 111 break; 112 case MTD_ROM: 113 type = "rom"; 114 break; 115 case MTD_NORFLASH: 116 type = "nor"; 117 break; 118 case MTD_NANDFLASH: 119 type = "nand"; 120 break; 121 case MTD_DATAFLASH: 122 type = "dataflash"; 123 break; 124 case MTD_UBIVOLUME: 125 type = "ubi"; 126 break; 127 case MTD_MLCNANDFLASH: 128 type = "mlc-nand"; 129 break; 130 default: 131 type = "unknown"; 132 } 133 134 return snprintf(buf, PAGE_SIZE, "%s\n", type); 135 } 136 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL); 137 138 static ssize_t mtd_flags_show(struct device *dev, 139 struct device_attribute *attr, char *buf) 140 { 141 struct mtd_info *mtd = dev_get_drvdata(dev); 142 143 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); 144 } 145 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL); 146 147 static ssize_t mtd_size_show(struct device *dev, 148 struct device_attribute *attr, char *buf) 149 { 150 struct mtd_info *mtd = dev_get_drvdata(dev); 151 152 return snprintf(buf, PAGE_SIZE, "%llu\n", 153 (unsigned long long)mtd->size); 154 } 155 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL); 156 157 static ssize_t mtd_erasesize_show(struct device *dev, 158 struct device_attribute *attr, char *buf) 159 { 160 struct mtd_info *mtd = dev_get_drvdata(dev); 161 162 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); 163 } 164 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL); 165 166 static ssize_t mtd_writesize_show(struct device *dev, 167 struct device_attribute *attr, char *buf) 168 { 169 struct mtd_info *mtd = dev_get_drvdata(dev); 170 171 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); 172 } 173 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL); 174 175 static ssize_t mtd_subpagesize_show(struct device *dev, 176 struct device_attribute *attr, char *buf) 177 { 178 struct mtd_info *mtd = dev_get_drvdata(dev); 179 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; 180 181 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); 182 } 183 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL); 184 185 static ssize_t mtd_oobsize_show(struct device *dev, 186 struct device_attribute *attr, char *buf) 187 { 188 struct mtd_info *mtd = dev_get_drvdata(dev); 189 190 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); 191 } 192 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL); 193 194 static ssize_t mtd_oobavail_show(struct device *dev, 195 struct device_attribute *attr, char *buf) 196 { 197 struct mtd_info *mtd = dev_get_drvdata(dev); 198 199 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail); 200 } 201 static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL); 202 203 static ssize_t mtd_numeraseregions_show(struct device *dev, 204 struct device_attribute *attr, char *buf) 205 { 206 struct mtd_info *mtd = dev_get_drvdata(dev); 207 208 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); 209 } 210 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show, 211 NULL); 212 213 static ssize_t mtd_name_show(struct device *dev, 214 struct device_attribute *attr, char *buf) 215 { 216 struct mtd_info *mtd = dev_get_drvdata(dev); 217 218 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); 219 } 220 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); 221 222 static ssize_t mtd_ecc_strength_show(struct device *dev, 223 struct device_attribute *attr, char *buf) 224 { 225 struct mtd_info *mtd = dev_get_drvdata(dev); 226 227 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength); 228 } 229 static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL); 230 231 static ssize_t mtd_bitflip_threshold_show(struct device *dev, 232 struct device_attribute *attr, 233 char *buf) 234 { 235 struct mtd_info *mtd = dev_get_drvdata(dev); 236 237 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold); 238 } 239 240 static ssize_t mtd_bitflip_threshold_store(struct device *dev, 241 struct device_attribute *attr, 242 const char *buf, size_t count) 243 { 244 struct mtd_info *mtd = dev_get_drvdata(dev); 245 unsigned int bitflip_threshold; 246 int retval; 247 248 retval = kstrtouint(buf, 0, &bitflip_threshold); 249 if (retval) 250 return retval; 251 252 mtd->bitflip_threshold = bitflip_threshold; 253 return count; 254 } 255 static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR, 256 mtd_bitflip_threshold_show, 257 mtd_bitflip_threshold_store); 258 259 static ssize_t mtd_ecc_step_size_show(struct device *dev, 260 struct device_attribute *attr, char *buf) 261 { 262 struct mtd_info *mtd = dev_get_drvdata(dev); 263 264 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size); 265 266 } 267 static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL); 268 269 static ssize_t mtd_ecc_stats_corrected_show(struct device *dev, 270 struct device_attribute *attr, char *buf) 271 { 272 struct mtd_info *mtd = dev_get_drvdata(dev); 273 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 274 275 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected); 276 } 277 static DEVICE_ATTR(corrected_bits, S_IRUGO, 278 mtd_ecc_stats_corrected_show, NULL); 279 280 static ssize_t mtd_ecc_stats_errors_show(struct device *dev, 281 struct device_attribute *attr, char *buf) 282 { 283 struct mtd_info *mtd = dev_get_drvdata(dev); 284 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 285 286 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed); 287 } 288 static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL); 289 290 static ssize_t mtd_badblocks_show(struct device *dev, 291 struct device_attribute *attr, char *buf) 292 { 293 struct mtd_info *mtd = dev_get_drvdata(dev); 294 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 295 296 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks); 297 } 298 static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL); 299 300 static ssize_t mtd_bbtblocks_show(struct device *dev, 301 struct device_attribute *attr, char *buf) 302 { 303 struct mtd_info *mtd = dev_get_drvdata(dev); 304 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; 305 306 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks); 307 } 308 static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL); 309 310 static struct attribute *mtd_attrs[] = { 311 &dev_attr_type.attr, 312 &dev_attr_flags.attr, 313 &dev_attr_size.attr, 314 &dev_attr_erasesize.attr, 315 &dev_attr_writesize.attr, 316 &dev_attr_subpagesize.attr, 317 &dev_attr_oobsize.attr, 318 &dev_attr_oobavail.attr, 319 &dev_attr_numeraseregions.attr, 320 &dev_attr_name.attr, 321 &dev_attr_ecc_strength.attr, 322 &dev_attr_ecc_step_size.attr, 323 &dev_attr_corrected_bits.attr, 324 &dev_attr_ecc_failures.attr, 325 &dev_attr_bad_blocks.attr, 326 &dev_attr_bbt_blocks.attr, 327 &dev_attr_bitflip_threshold.attr, 328 NULL, 329 }; 330 ATTRIBUTE_GROUPS(mtd); 331 332 static const struct device_type mtd_devtype = { 333 .name = "mtd", 334 .groups = mtd_groups, 335 .release = mtd_release, 336 }; 337 338 static int mtd_partid_debug_show(struct seq_file *s, void *p) 339 { 340 struct mtd_info *mtd = s->private; 341 342 seq_printf(s, "%s\n", mtd->dbg.partid); 343 344 return 0; 345 } 346 347 DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug); 348 349 static int mtd_partname_debug_show(struct seq_file *s, void *p) 350 { 351 struct mtd_info *mtd = s->private; 352 353 seq_printf(s, "%s\n", mtd->dbg.partname); 354 355 return 0; 356 } 357 358 DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug); 359 360 static struct dentry *dfs_dir_mtd; 361 362 static void mtd_debugfs_populate(struct mtd_info *mtd) 363 { 364 struct device *dev = &mtd->dev; 365 struct dentry *root; 366 367 if (IS_ERR_OR_NULL(dfs_dir_mtd)) 368 return; 369 370 root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); 371 mtd->dbg.dfs_dir = root; 372 373 if (mtd->dbg.partid) 374 debugfs_create_file("partid", 0400, root, mtd, 375 &mtd_partid_debug_fops); 376 377 if (mtd->dbg.partname) 378 debugfs_create_file("partname", 0400, root, mtd, 379 &mtd_partname_debug_fops); 380 } 381 382 #ifndef CONFIG_MMU 383 unsigned mtd_mmap_capabilities(struct mtd_info *mtd) 384 { 385 switch (mtd->type) { 386 case MTD_RAM: 387 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 388 NOMMU_MAP_READ | NOMMU_MAP_WRITE; 389 case MTD_ROM: 390 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | 391 NOMMU_MAP_READ; 392 default: 393 return NOMMU_MAP_COPY; 394 } 395 } 396 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); 397 #endif 398 399 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, 400 void *cmd) 401 { 402 struct mtd_info *mtd; 403 404 mtd = container_of(n, struct mtd_info, reboot_notifier); 405 mtd->_reboot(mtd); 406 407 return NOTIFY_DONE; 408 } 409 410 /** 411 * mtd_wunit_to_pairing_info - get pairing information of a wunit 412 * @mtd: pointer to new MTD device info structure 413 * @wunit: write unit we are interested in 414 * @info: returned pairing information 415 * 416 * Retrieve pairing information associated to the wunit. 417 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be 418 * paired together, and where programming a page may influence the page it is 419 * paired with. 420 * The notion of page is replaced by the term wunit (write-unit) to stay 421 * consistent with the ->writesize field. 422 * 423 * The @wunit argument can be extracted from an absolute offset using 424 * mtd_offset_to_wunit(). @info is filled with the pairing information attached 425 * to @wunit. 426 * 427 * From the pairing info the MTD user can find all the wunits paired with 428 * @wunit using the following loop: 429 * 430 * for (i = 0; i < mtd_pairing_groups(mtd); i++) { 431 * info.pair = i; 432 * mtd_pairing_info_to_wunit(mtd, &info); 433 * ... 434 * } 435 */ 436 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, 437 struct mtd_pairing_info *info) 438 { 439 struct mtd_info *master = mtd_get_master(mtd); 440 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master); 441 442 if (wunit < 0 || wunit >= npairs) 443 return -EINVAL; 444 445 if (master->pairing && master->pairing->get_info) 446 return master->pairing->get_info(master, wunit, info); 447 448 info->group = 0; 449 info->pair = wunit; 450 451 return 0; 452 } 453 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info); 454 455 /** 456 * mtd_pairing_info_to_wunit - get wunit from pairing information 457 * @mtd: pointer to new MTD device info structure 458 * @info: pairing information struct 459 * 460 * Returns a positive number representing the wunit associated to the info 461 * struct, or a negative error code. 462 * 463 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to 464 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info() 465 * doc). 466 * 467 * It can also be used to only program the first page of each pair (i.e. 468 * page attached to group 0), which allows one to use an MLC NAND in 469 * software-emulated SLC mode: 470 * 471 * info.group = 0; 472 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd); 473 * for (info.pair = 0; info.pair < npairs; info.pair++) { 474 * wunit = mtd_pairing_info_to_wunit(mtd, &info); 475 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit), 476 * mtd->writesize, &retlen, buf + (i * mtd->writesize)); 477 * } 478 */ 479 int mtd_pairing_info_to_wunit(struct mtd_info *mtd, 480 const struct mtd_pairing_info *info) 481 { 482 struct mtd_info *master = mtd_get_master(mtd); 483 int ngroups = mtd_pairing_groups(master); 484 int npairs = mtd_wunit_per_eb(master) / ngroups; 485 486 if (!info || info->pair < 0 || info->pair >= npairs || 487 info->group < 0 || info->group >= ngroups) 488 return -EINVAL; 489 490 if (master->pairing && master->pairing->get_wunit) 491 return mtd->pairing->get_wunit(master, info); 492 493 return info->pair; 494 } 495 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit); 496 497 /** 498 * mtd_pairing_groups - get the number of pairing groups 499 * @mtd: pointer to new MTD device info structure 500 * 501 * Returns the number of pairing groups. 502 * 503 * This number is usually equal to the number of bits exposed by a single 504 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit() 505 * to iterate over all pages of a given pair. 506 */ 507 int mtd_pairing_groups(struct mtd_info *mtd) 508 { 509 struct mtd_info *master = mtd_get_master(mtd); 510 511 if (!master->pairing || !master->pairing->ngroups) 512 return 1; 513 514 return master->pairing->ngroups; 515 } 516 EXPORT_SYMBOL_GPL(mtd_pairing_groups); 517 518 static int mtd_nvmem_reg_read(void *priv, unsigned int offset, 519 void *val, size_t bytes) 520 { 521 struct mtd_info *mtd = priv; 522 size_t retlen; 523 int err; 524 525 err = mtd_read(mtd, offset, bytes, &retlen, val); 526 if (err && err != -EUCLEAN) 527 return err; 528 529 return retlen == bytes ? 0 : -EIO; 530 } 531 532 static int mtd_nvmem_add(struct mtd_info *mtd) 533 { 534 struct nvmem_config config = {}; 535 536 config.id = -1; 537 config.dev = &mtd->dev; 538 config.name = dev_name(&mtd->dev); 539 config.owner = THIS_MODULE; 540 config.reg_read = mtd_nvmem_reg_read; 541 config.size = mtd->size; 542 config.word_size = 1; 543 config.stride = 1; 544 config.read_only = true; 545 config.root_only = true; 546 config.no_of_node = true; 547 config.priv = mtd; 548 549 mtd->nvmem = nvmem_register(&config); 550 if (IS_ERR(mtd->nvmem)) { 551 /* Just ignore if there is no NVMEM support in the kernel */ 552 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) { 553 mtd->nvmem = NULL; 554 } else { 555 dev_err(&mtd->dev, "Failed to register NVMEM device\n"); 556 return PTR_ERR(mtd->nvmem); 557 } 558 } 559 560 return 0; 561 } 562 563 /** 564 * add_mtd_device - register an MTD device 565 * @mtd: pointer to new MTD device info structure 566 * 567 * Add a device to the list of MTD devices present in the system, and 568 * notify each currently active MTD 'user' of its arrival. Returns 569 * zero on success or non-zero on failure. 570 */ 571 572 int add_mtd_device(struct mtd_info *mtd) 573 { 574 struct mtd_info *master = mtd_get_master(mtd); 575 struct mtd_notifier *not; 576 int i, error; 577 578 /* 579 * May occur, for instance, on buggy drivers which call 580 * mtd_device_parse_register() multiple times on the same master MTD, 581 * especially with CONFIG_MTD_PARTITIONED_MASTER=y. 582 */ 583 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n")) 584 return -EEXIST; 585 586 BUG_ON(mtd->writesize == 0); 587 588 /* 589 * MTD drivers should implement ->_{write,read}() or 590 * ->_{write,read}_oob(), but not both. 591 */ 592 if (WARN_ON((mtd->_write && mtd->_write_oob) || 593 (mtd->_read && mtd->_read_oob))) 594 return -EINVAL; 595 596 if (WARN_ON((!mtd->erasesize || !master->_erase) && 597 !(mtd->flags & MTD_NO_ERASE))) 598 return -EINVAL; 599 600 /* 601 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the 602 * master is an MLC NAND and has a proper pairing scheme defined. 603 * We also reject masters that implement ->_writev() for now, because 604 * NAND controller drivers don't implement this hook, and adding the 605 * SLC -> MLC address/length conversion to this path is useless if we 606 * don't have a user. 607 */ 608 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION && 609 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH || 610 !master->pairing || master->_writev)) 611 return -EINVAL; 612 613 mutex_lock(&mtd_table_mutex); 614 615 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); 616 if (i < 0) { 617 error = i; 618 goto fail_locked; 619 } 620 621 mtd->index = i; 622 mtd->usecount = 0; 623 624 /* default value if not set by driver */ 625 if (mtd->bitflip_threshold == 0) 626 mtd->bitflip_threshold = mtd->ecc_strength; 627 628 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 629 int ngroups = mtd_pairing_groups(master); 630 631 mtd->erasesize /= ngroups; 632 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) * 633 mtd->erasesize; 634 } 635 636 if (is_power_of_2(mtd->erasesize)) 637 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 638 else 639 mtd->erasesize_shift = 0; 640 641 if (is_power_of_2(mtd->writesize)) 642 mtd->writesize_shift = ffs(mtd->writesize) - 1; 643 else 644 mtd->writesize_shift = 0; 645 646 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 647 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 648 649 /* Some chips always power up locked. Unlock them now */ 650 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { 651 error = mtd_unlock(mtd, 0, mtd->size); 652 if (error && error != -EOPNOTSUPP) 653 printk(KERN_WARNING 654 "%s: unlock failed, writes may not work\n", 655 mtd->name); 656 /* Ignore unlock failures? */ 657 error = 0; 658 } 659 660 /* Caller should have set dev.parent to match the 661 * physical device, if appropriate. 662 */ 663 mtd->dev.type = &mtd_devtype; 664 mtd->dev.class = &mtd_class; 665 mtd->dev.devt = MTD_DEVT(i); 666 dev_set_name(&mtd->dev, "mtd%d", i); 667 dev_set_drvdata(&mtd->dev, mtd); 668 of_node_get(mtd_get_of_node(mtd)); 669 error = device_register(&mtd->dev); 670 if (error) 671 goto fail_added; 672 673 /* Add the nvmem provider */ 674 error = mtd_nvmem_add(mtd); 675 if (error) 676 goto fail_nvmem_add; 677 678 mtd_debugfs_populate(mtd); 679 680 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, 681 "mtd%dro", i); 682 683 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); 684 /* No need to get a refcount on the module containing 685 the notifier, since we hold the mtd_table_mutex */ 686 list_for_each_entry(not, &mtd_notifiers, list) 687 not->add(mtd); 688 689 mutex_unlock(&mtd_table_mutex); 690 /* We _know_ we aren't being removed, because 691 our caller is still holding us here. So none 692 of this try_ nonsense, and no bitching about it 693 either. :) */ 694 __module_get(THIS_MODULE); 695 return 0; 696 697 fail_nvmem_add: 698 device_unregister(&mtd->dev); 699 fail_added: 700 of_node_put(mtd_get_of_node(mtd)); 701 idr_remove(&mtd_idr, i); 702 fail_locked: 703 mutex_unlock(&mtd_table_mutex); 704 return error; 705 } 706 707 /** 708 * del_mtd_device - unregister an MTD device 709 * @mtd: pointer to MTD device info structure 710 * 711 * Remove a device from the list of MTD devices present in the system, 712 * and notify each currently active MTD 'user' of its departure. 713 * Returns zero on success or 1 on failure, which currently will happen 714 * if the requested device does not appear to be present in the list. 715 */ 716 717 int del_mtd_device(struct mtd_info *mtd) 718 { 719 int ret; 720 struct mtd_notifier *not; 721 722 mutex_lock(&mtd_table_mutex); 723 724 debugfs_remove_recursive(mtd->dbg.dfs_dir); 725 726 if (idr_find(&mtd_idr, mtd->index) != mtd) { 727 ret = -ENODEV; 728 goto out_error; 729 } 730 731 /* No need to get a refcount on the module containing 732 the notifier, since we hold the mtd_table_mutex */ 733 list_for_each_entry(not, &mtd_notifiers, list) 734 not->remove(mtd); 735 736 if (mtd->usecount) { 737 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", 738 mtd->index, mtd->name, mtd->usecount); 739 ret = -EBUSY; 740 } else { 741 /* Try to remove the NVMEM provider */ 742 if (mtd->nvmem) 743 nvmem_unregister(mtd->nvmem); 744 745 device_unregister(&mtd->dev); 746 747 idr_remove(&mtd_idr, mtd->index); 748 of_node_put(mtd_get_of_node(mtd)); 749 750 module_put(THIS_MODULE); 751 ret = 0; 752 } 753 754 out_error: 755 mutex_unlock(&mtd_table_mutex); 756 return ret; 757 } 758 759 /* 760 * Set a few defaults based on the parent devices, if not provided by the 761 * driver 762 */ 763 static void mtd_set_dev_defaults(struct mtd_info *mtd) 764 { 765 if (mtd->dev.parent) { 766 if (!mtd->owner && mtd->dev.parent->driver) 767 mtd->owner = mtd->dev.parent->driver->owner; 768 if (!mtd->name) 769 mtd->name = dev_name(mtd->dev.parent); 770 } else { 771 pr_debug("mtd device won't show a device symlink in sysfs\n"); 772 } 773 774 INIT_LIST_HEAD(&mtd->partitions); 775 mutex_init(&mtd->master.partitions_lock); 776 mutex_init(&mtd->master.chrdev_lock); 777 } 778 779 /** 780 * mtd_device_parse_register - parse partitions and register an MTD device. 781 * 782 * @mtd: the MTD device to register 783 * @types: the list of MTD partition probes to try, see 784 * 'parse_mtd_partitions()' for more information 785 * @parser_data: MTD partition parser-specific data 786 * @parts: fallback partition information to register, if parsing fails; 787 * only valid if %nr_parts > %0 788 * @nr_parts: the number of partitions in parts, if zero then the full 789 * MTD device is registered if no partition info is found 790 * 791 * This function aggregates MTD partitions parsing (done by 792 * 'parse_mtd_partitions()') and MTD device and partitions registering. It 793 * basically follows the most common pattern found in many MTD drivers: 794 * 795 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is 796 * registered first. 797 * * Then It tries to probe partitions on MTD device @mtd using parsers 798 * specified in @types (if @types is %NULL, then the default list of parsers 799 * is used, see 'parse_mtd_partitions()' for more information). If none are 800 * found this functions tries to fallback to information specified in 801 * @parts/@nr_parts. 802 * * If no partitions were found this function just registers the MTD device 803 * @mtd and exits. 804 * 805 * Returns zero in case of success and a negative error code in case of failure. 806 */ 807 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, 808 struct mtd_part_parser_data *parser_data, 809 const struct mtd_partition *parts, 810 int nr_parts) 811 { 812 int ret; 813 814 mtd_set_dev_defaults(mtd); 815 816 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { 817 ret = add_mtd_device(mtd); 818 if (ret) 819 return ret; 820 } 821 822 /* Prefer parsed partitions over driver-provided fallback */ 823 ret = parse_mtd_partitions(mtd, types, parser_data); 824 if (ret > 0) 825 ret = 0; 826 else if (nr_parts) 827 ret = add_mtd_partitions(mtd, parts, nr_parts); 828 else if (!device_is_registered(&mtd->dev)) 829 ret = add_mtd_device(mtd); 830 else 831 ret = 0; 832 833 if (ret) 834 goto out; 835 836 /* 837 * FIXME: some drivers unfortunately call this function more than once. 838 * So we have to check if we've already assigned the reboot notifier. 839 * 840 * Generally, we can make multiple calls work for most cases, but it 841 * does cause problems with parse_mtd_partitions() above (e.g., 842 * cmdlineparts will register partitions more than once). 843 */ 844 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call, 845 "MTD already registered\n"); 846 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { 847 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; 848 register_reboot_notifier(&mtd->reboot_notifier); 849 } 850 851 out: 852 if (ret && device_is_registered(&mtd->dev)) 853 del_mtd_device(mtd); 854 855 return ret; 856 } 857 EXPORT_SYMBOL_GPL(mtd_device_parse_register); 858 859 /** 860 * mtd_device_unregister - unregister an existing MTD device. 861 * 862 * @master: the MTD device to unregister. This will unregister both the master 863 * and any partitions if registered. 864 */ 865 int mtd_device_unregister(struct mtd_info *master) 866 { 867 int err; 868 869 if (master->_reboot) 870 unregister_reboot_notifier(&master->reboot_notifier); 871 872 err = del_mtd_partitions(master); 873 if (err) 874 return err; 875 876 if (!device_is_registered(&master->dev)) 877 return 0; 878 879 return del_mtd_device(master); 880 } 881 EXPORT_SYMBOL_GPL(mtd_device_unregister); 882 883 /** 884 * register_mtd_user - register a 'user' of MTD devices. 885 * @new: pointer to notifier info structure 886 * 887 * Registers a pair of callbacks function to be called upon addition 888 * or removal of MTD devices. Causes the 'add' callback to be immediately 889 * invoked for each MTD device currently present in the system. 890 */ 891 void register_mtd_user (struct mtd_notifier *new) 892 { 893 struct mtd_info *mtd; 894 895 mutex_lock(&mtd_table_mutex); 896 897 list_add(&new->list, &mtd_notifiers); 898 899 __module_get(THIS_MODULE); 900 901 mtd_for_each_device(mtd) 902 new->add(mtd); 903 904 mutex_unlock(&mtd_table_mutex); 905 } 906 EXPORT_SYMBOL_GPL(register_mtd_user); 907 908 /** 909 * unregister_mtd_user - unregister a 'user' of MTD devices. 910 * @old: pointer to notifier info structure 911 * 912 * Removes a callback function pair from the list of 'users' to be 913 * notified upon addition or removal of MTD devices. Causes the 914 * 'remove' callback to be immediately invoked for each MTD device 915 * currently present in the system. 916 */ 917 int unregister_mtd_user (struct mtd_notifier *old) 918 { 919 struct mtd_info *mtd; 920 921 mutex_lock(&mtd_table_mutex); 922 923 module_put(THIS_MODULE); 924 925 mtd_for_each_device(mtd) 926 old->remove(mtd); 927 928 list_del(&old->list); 929 mutex_unlock(&mtd_table_mutex); 930 return 0; 931 } 932 EXPORT_SYMBOL_GPL(unregister_mtd_user); 933 934 /** 935 * get_mtd_device - obtain a validated handle for an MTD device 936 * @mtd: last known address of the required MTD device 937 * @num: internal device number of the required MTD device 938 * 939 * Given a number and NULL address, return the num'th entry in the device 940 * table, if any. Given an address and num == -1, search the device table 941 * for a device with that address and return if it's still present. Given 942 * both, return the num'th driver only if its address matches. Return 943 * error code if not. 944 */ 945 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 946 { 947 struct mtd_info *ret = NULL, *other; 948 int err = -ENODEV; 949 950 mutex_lock(&mtd_table_mutex); 951 952 if (num == -1) { 953 mtd_for_each_device(other) { 954 if (other == mtd) { 955 ret = mtd; 956 break; 957 } 958 } 959 } else if (num >= 0) { 960 ret = idr_find(&mtd_idr, num); 961 if (mtd && mtd != ret) 962 ret = NULL; 963 } 964 965 if (!ret) { 966 ret = ERR_PTR(err); 967 goto out; 968 } 969 970 err = __get_mtd_device(ret); 971 if (err) 972 ret = ERR_PTR(err); 973 out: 974 mutex_unlock(&mtd_table_mutex); 975 return ret; 976 } 977 EXPORT_SYMBOL_GPL(get_mtd_device); 978 979 980 int __get_mtd_device(struct mtd_info *mtd) 981 { 982 struct mtd_info *master = mtd_get_master(mtd); 983 int err; 984 985 if (!try_module_get(master->owner)) 986 return -ENODEV; 987 988 if (master->_get_device) { 989 err = master->_get_device(mtd); 990 991 if (err) { 992 module_put(master->owner); 993 return err; 994 } 995 } 996 997 master->usecount++; 998 999 while (mtd->parent) { 1000 mtd->usecount++; 1001 mtd = mtd->parent; 1002 } 1003 1004 return 0; 1005 } 1006 EXPORT_SYMBOL_GPL(__get_mtd_device); 1007 1008 /** 1009 * get_mtd_device_nm - obtain a validated handle for an MTD device by 1010 * device name 1011 * @name: MTD device name to open 1012 * 1013 * This function returns MTD device description structure in case of 1014 * success and an error code in case of failure. 1015 */ 1016 struct mtd_info *get_mtd_device_nm(const char *name) 1017 { 1018 int err = -ENODEV; 1019 struct mtd_info *mtd = NULL, *other; 1020 1021 mutex_lock(&mtd_table_mutex); 1022 1023 mtd_for_each_device(other) { 1024 if (!strcmp(name, other->name)) { 1025 mtd = other; 1026 break; 1027 } 1028 } 1029 1030 if (!mtd) 1031 goto out_unlock; 1032 1033 err = __get_mtd_device(mtd); 1034 if (err) 1035 goto out_unlock; 1036 1037 mutex_unlock(&mtd_table_mutex); 1038 return mtd; 1039 1040 out_unlock: 1041 mutex_unlock(&mtd_table_mutex); 1042 return ERR_PTR(err); 1043 } 1044 EXPORT_SYMBOL_GPL(get_mtd_device_nm); 1045 1046 void put_mtd_device(struct mtd_info *mtd) 1047 { 1048 mutex_lock(&mtd_table_mutex); 1049 __put_mtd_device(mtd); 1050 mutex_unlock(&mtd_table_mutex); 1051 1052 } 1053 EXPORT_SYMBOL_GPL(put_mtd_device); 1054 1055 void __put_mtd_device(struct mtd_info *mtd) 1056 { 1057 struct mtd_info *master = mtd_get_master(mtd); 1058 1059 while (mtd->parent) { 1060 --mtd->usecount; 1061 BUG_ON(mtd->usecount < 0); 1062 mtd = mtd->parent; 1063 } 1064 1065 master->usecount--; 1066 1067 if (master->_put_device) 1068 master->_put_device(master); 1069 1070 module_put(master->owner); 1071 } 1072 EXPORT_SYMBOL_GPL(__put_mtd_device); 1073 1074 /* 1075 * Erase is an synchronous operation. Device drivers are epected to return a 1076 * negative error code if the operation failed and update instr->fail_addr 1077 * to point the portion that was not properly erased. 1078 */ 1079 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 1080 { 1081 struct mtd_info *master = mtd_get_master(mtd); 1082 u64 mst_ofs = mtd_get_master_ofs(mtd, 0); 1083 struct erase_info adjinstr; 1084 int ret; 1085 1086 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 1087 adjinstr = *instr; 1088 1089 if (!mtd->erasesize || !master->_erase) 1090 return -ENOTSUPP; 1091 1092 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) 1093 return -EINVAL; 1094 if (!(mtd->flags & MTD_WRITEABLE)) 1095 return -EROFS; 1096 1097 if (!instr->len) 1098 return 0; 1099 1100 ledtrig_mtd_activity(); 1101 1102 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1103 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) * 1104 master->erasesize; 1105 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) * 1106 master->erasesize) - 1107 adjinstr.addr; 1108 } 1109 1110 adjinstr.addr += mst_ofs; 1111 1112 ret = master->_erase(master, &adjinstr); 1113 1114 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) { 1115 instr->fail_addr = adjinstr.fail_addr - mst_ofs; 1116 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1117 instr->fail_addr = mtd_div_by_eb(instr->fail_addr, 1118 master); 1119 instr->fail_addr *= mtd->erasesize; 1120 } 1121 } 1122 1123 return ret; 1124 } 1125 EXPORT_SYMBOL_GPL(mtd_erase); 1126 1127 /* 1128 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. 1129 */ 1130 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1131 void **virt, resource_size_t *phys) 1132 { 1133 struct mtd_info *master = mtd_get_master(mtd); 1134 1135 *retlen = 0; 1136 *virt = NULL; 1137 if (phys) 1138 *phys = 0; 1139 if (!master->_point) 1140 return -EOPNOTSUPP; 1141 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1142 return -EINVAL; 1143 if (!len) 1144 return 0; 1145 1146 from = mtd_get_master_ofs(mtd, from); 1147 return master->_point(master, from, len, retlen, virt, phys); 1148 } 1149 EXPORT_SYMBOL_GPL(mtd_point); 1150 1151 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 1152 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 1153 { 1154 struct mtd_info *master = mtd_get_master(mtd); 1155 1156 if (!master->_unpoint) 1157 return -EOPNOTSUPP; 1158 if (from < 0 || from >= mtd->size || len > mtd->size - from) 1159 return -EINVAL; 1160 if (!len) 1161 return 0; 1162 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len); 1163 } 1164 EXPORT_SYMBOL_GPL(mtd_unpoint); 1165 1166 /* 1167 * Allow NOMMU mmap() to directly map the device (if not NULL) 1168 * - return the address to which the offset maps 1169 * - return -ENOSYS to indicate refusal to do the mapping 1170 */ 1171 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, 1172 unsigned long offset, unsigned long flags) 1173 { 1174 size_t retlen; 1175 void *virt; 1176 int ret; 1177 1178 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); 1179 if (ret) 1180 return ret; 1181 if (retlen != len) { 1182 mtd_unpoint(mtd, offset, retlen); 1183 return -ENOSYS; 1184 } 1185 return (unsigned long)virt; 1186 } 1187 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); 1188 1189 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master, 1190 const struct mtd_ecc_stats *old_stats) 1191 { 1192 struct mtd_ecc_stats diff; 1193 1194 if (master == mtd) 1195 return; 1196 1197 diff = master->ecc_stats; 1198 diff.failed -= old_stats->failed; 1199 diff.corrected -= old_stats->corrected; 1200 1201 while (mtd->parent) { 1202 mtd->ecc_stats.failed += diff.failed; 1203 mtd->ecc_stats.corrected += diff.corrected; 1204 mtd = mtd->parent; 1205 } 1206 } 1207 1208 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 1209 u_char *buf) 1210 { 1211 struct mtd_oob_ops ops = { 1212 .len = len, 1213 .datbuf = buf, 1214 }; 1215 int ret; 1216 1217 ret = mtd_read_oob(mtd, from, &ops); 1218 *retlen = ops.retlen; 1219 1220 return ret; 1221 } 1222 EXPORT_SYMBOL_GPL(mtd_read); 1223 1224 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1225 const u_char *buf) 1226 { 1227 struct mtd_oob_ops ops = { 1228 .len = len, 1229 .datbuf = (u8 *)buf, 1230 }; 1231 int ret; 1232 1233 ret = mtd_write_oob(mtd, to, &ops); 1234 *retlen = ops.retlen; 1235 1236 return ret; 1237 } 1238 EXPORT_SYMBOL_GPL(mtd_write); 1239 1240 /* 1241 * In blackbox flight recorder like scenarios we want to make successful writes 1242 * in interrupt context. panic_write() is only intended to be called when its 1243 * known the kernel is about to panic and we need the write to succeed. Since 1244 * the kernel is not going to be running for much longer, this function can 1245 * break locks and delay to ensure the write succeeds (but not sleep). 1246 */ 1247 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 1248 const u_char *buf) 1249 { 1250 struct mtd_info *master = mtd_get_master(mtd); 1251 1252 *retlen = 0; 1253 if (!master->_panic_write) 1254 return -EOPNOTSUPP; 1255 if (to < 0 || to >= mtd->size || len > mtd->size - to) 1256 return -EINVAL; 1257 if (!(mtd->flags & MTD_WRITEABLE)) 1258 return -EROFS; 1259 if (!len) 1260 return 0; 1261 if (!master->oops_panic_write) 1262 master->oops_panic_write = true; 1263 1264 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len, 1265 retlen, buf); 1266 } 1267 EXPORT_SYMBOL_GPL(mtd_panic_write); 1268 1269 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, 1270 struct mtd_oob_ops *ops) 1271 { 1272 /* 1273 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving 1274 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in 1275 * this case. 1276 */ 1277 if (!ops->datbuf) 1278 ops->len = 0; 1279 1280 if (!ops->oobbuf) 1281 ops->ooblen = 0; 1282 1283 if (offs < 0 || offs + ops->len > mtd->size) 1284 return -EINVAL; 1285 1286 if (ops->ooblen) { 1287 size_t maxooblen; 1288 1289 if (ops->ooboffs >= mtd_oobavail(mtd, ops)) 1290 return -EINVAL; 1291 1292 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) - 1293 mtd_div_by_ws(offs, mtd)) * 1294 mtd_oobavail(mtd, ops)) - ops->ooboffs; 1295 if (ops->ooblen > maxooblen) 1296 return -EINVAL; 1297 } 1298 1299 return 0; 1300 } 1301 1302 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from, 1303 struct mtd_oob_ops *ops) 1304 { 1305 struct mtd_info *master = mtd_get_master(mtd); 1306 int ret; 1307 1308 from = mtd_get_master_ofs(mtd, from); 1309 if (master->_read_oob) 1310 ret = master->_read_oob(master, from, ops); 1311 else 1312 ret = master->_read(master, from, ops->len, &ops->retlen, 1313 ops->datbuf); 1314 1315 return ret; 1316 } 1317 1318 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to, 1319 struct mtd_oob_ops *ops) 1320 { 1321 struct mtd_info *master = mtd_get_master(mtd); 1322 int ret; 1323 1324 to = mtd_get_master_ofs(mtd, to); 1325 if (master->_write_oob) 1326 ret = master->_write_oob(master, to, ops); 1327 else 1328 ret = master->_write(master, to, ops->len, &ops->retlen, 1329 ops->datbuf); 1330 1331 return ret; 1332 } 1333 1334 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read, 1335 struct mtd_oob_ops *ops) 1336 { 1337 struct mtd_info *master = mtd_get_master(mtd); 1338 int ngroups = mtd_pairing_groups(master); 1339 int npairs = mtd_wunit_per_eb(master) / ngroups; 1340 struct mtd_oob_ops adjops = *ops; 1341 unsigned int wunit, oobavail; 1342 struct mtd_pairing_info info; 1343 int max_bitflips = 0; 1344 u32 ebofs, pageofs; 1345 loff_t base, pos; 1346 1347 ebofs = mtd_mod_by_eb(start, mtd); 1348 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize; 1349 info.group = 0; 1350 info.pair = mtd_div_by_ws(ebofs, mtd); 1351 pageofs = mtd_mod_by_ws(ebofs, mtd); 1352 oobavail = mtd_oobavail(mtd, ops); 1353 1354 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { 1355 int ret; 1356 1357 if (info.pair >= npairs) { 1358 info.pair = 0; 1359 base += master->erasesize; 1360 } 1361 1362 wunit = mtd_pairing_info_to_wunit(master, &info); 1363 pos = mtd_wunit_to_offset(mtd, base, wunit); 1364 1365 adjops.len = ops->len - ops->retlen; 1366 if (adjops.len > mtd->writesize - pageofs) 1367 adjops.len = mtd->writesize - pageofs; 1368 1369 adjops.ooblen = ops->ooblen - ops->oobretlen; 1370 if (adjops.ooblen > oobavail - adjops.ooboffs) 1371 adjops.ooblen = oobavail - adjops.ooboffs; 1372 1373 if (read) { 1374 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops); 1375 if (ret > 0) 1376 max_bitflips = max(max_bitflips, ret); 1377 } else { 1378 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops); 1379 } 1380 1381 if (ret < 0) 1382 return ret; 1383 1384 max_bitflips = max(max_bitflips, ret); 1385 ops->retlen += adjops.retlen; 1386 ops->oobretlen += adjops.oobretlen; 1387 adjops.datbuf += adjops.retlen; 1388 adjops.oobbuf += adjops.oobretlen; 1389 adjops.ooboffs = 0; 1390 pageofs = 0; 1391 info.pair++; 1392 } 1393 1394 return max_bitflips; 1395 } 1396 1397 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 1398 { 1399 struct mtd_info *master = mtd_get_master(mtd); 1400 struct mtd_ecc_stats old_stats = master->ecc_stats; 1401 int ret_code; 1402 1403 ops->retlen = ops->oobretlen = 0; 1404 1405 ret_code = mtd_check_oob_ops(mtd, from, ops); 1406 if (ret_code) 1407 return ret_code; 1408 1409 ledtrig_mtd_activity(); 1410 1411 /* Check the validity of a potential fallback on mtd->_read */ 1412 if (!master->_read_oob && (!master->_read || ops->oobbuf)) 1413 return -EOPNOTSUPP; 1414 1415 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 1416 ret_code = mtd_io_emulated_slc(mtd, from, true, ops); 1417 else 1418 ret_code = mtd_read_oob_std(mtd, from, ops); 1419 1420 mtd_update_ecc_stats(mtd, master, &old_stats); 1421 1422 /* 1423 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics 1424 * similar to mtd->_read(), returning a non-negative integer 1425 * representing max bitflips. In other cases, mtd->_read_oob() may 1426 * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). 1427 */ 1428 if (unlikely(ret_code < 0)) 1429 return ret_code; 1430 if (mtd->ecc_strength == 0) 1431 return 0; /* device lacks ecc */ 1432 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 1433 } 1434 EXPORT_SYMBOL_GPL(mtd_read_oob); 1435 1436 int mtd_write_oob(struct mtd_info *mtd, loff_t to, 1437 struct mtd_oob_ops *ops) 1438 { 1439 struct mtd_info *master = mtd_get_master(mtd); 1440 int ret; 1441 1442 ops->retlen = ops->oobretlen = 0; 1443 1444 if (!(mtd->flags & MTD_WRITEABLE)) 1445 return -EROFS; 1446 1447 ret = mtd_check_oob_ops(mtd, to, ops); 1448 if (ret) 1449 return ret; 1450 1451 ledtrig_mtd_activity(); 1452 1453 /* Check the validity of a potential fallback on mtd->_write */ 1454 if (!master->_write_oob && (!master->_write || ops->oobbuf)) 1455 return -EOPNOTSUPP; 1456 1457 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 1458 return mtd_io_emulated_slc(mtd, to, false, ops); 1459 1460 return mtd_write_oob_std(mtd, to, ops); 1461 } 1462 EXPORT_SYMBOL_GPL(mtd_write_oob); 1463 1464 /** 1465 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section 1466 * @mtd: MTD device structure 1467 * @section: ECC section. Depending on the layout you may have all the ECC 1468 * bytes stored in a single contiguous section, or one section 1469 * per ECC chunk (and sometime several sections for a single ECC 1470 * ECC chunk) 1471 * @oobecc: OOB region struct filled with the appropriate ECC position 1472 * information 1473 * 1474 * This function returns ECC section information in the OOB area. If you want 1475 * to get all the ECC bytes information, then you should call 1476 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. 1477 * 1478 * Returns zero on success, a negative error code otherwise. 1479 */ 1480 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, 1481 struct mtd_oob_region *oobecc) 1482 { 1483 struct mtd_info *master = mtd_get_master(mtd); 1484 1485 memset(oobecc, 0, sizeof(*oobecc)); 1486 1487 if (!master || section < 0) 1488 return -EINVAL; 1489 1490 if (!master->ooblayout || !master->ooblayout->ecc) 1491 return -ENOTSUPP; 1492 1493 return master->ooblayout->ecc(master, section, oobecc); 1494 } 1495 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); 1496 1497 /** 1498 * mtd_ooblayout_free - Get the OOB region definition of a specific free 1499 * section 1500 * @mtd: MTD device structure 1501 * @section: Free section you are interested in. Depending on the layout 1502 * you may have all the free bytes stored in a single contiguous 1503 * section, or one section per ECC chunk plus an extra section 1504 * for the remaining bytes (or other funky layout). 1505 * @oobfree: OOB region struct filled with the appropriate free position 1506 * information 1507 * 1508 * This function returns free bytes position in the OOB area. If you want 1509 * to get all the free bytes information, then you should call 1510 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. 1511 * 1512 * Returns zero on success, a negative error code otherwise. 1513 */ 1514 int mtd_ooblayout_free(struct mtd_info *mtd, int section, 1515 struct mtd_oob_region *oobfree) 1516 { 1517 struct mtd_info *master = mtd_get_master(mtd); 1518 1519 memset(oobfree, 0, sizeof(*oobfree)); 1520 1521 if (!master || section < 0) 1522 return -EINVAL; 1523 1524 if (!master->ooblayout || !master->ooblayout->free) 1525 return -ENOTSUPP; 1526 1527 return master->ooblayout->free(master, section, oobfree); 1528 } 1529 EXPORT_SYMBOL_GPL(mtd_ooblayout_free); 1530 1531 /** 1532 * mtd_ooblayout_find_region - Find the region attached to a specific byte 1533 * @mtd: mtd info structure 1534 * @byte: the byte we are searching for 1535 * @sectionp: pointer where the section id will be stored 1536 * @oobregion: used to retrieve the ECC position 1537 * @iter: iterator function. Should be either mtd_ooblayout_free or 1538 * mtd_ooblayout_ecc depending on the region type you're searching for 1539 * 1540 * This function returns the section id and oobregion information of a 1541 * specific byte. For example, say you want to know where the 4th ECC byte is 1542 * stored, you'll use: 1543 * 1544 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc); 1545 * 1546 * Returns zero on success, a negative error code otherwise. 1547 */ 1548 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, 1549 int *sectionp, struct mtd_oob_region *oobregion, 1550 int (*iter)(struct mtd_info *, 1551 int section, 1552 struct mtd_oob_region *oobregion)) 1553 { 1554 int pos = 0, ret, section = 0; 1555 1556 memset(oobregion, 0, sizeof(*oobregion)); 1557 1558 while (1) { 1559 ret = iter(mtd, section, oobregion); 1560 if (ret) 1561 return ret; 1562 1563 if (pos + oobregion->length > byte) 1564 break; 1565 1566 pos += oobregion->length; 1567 section++; 1568 } 1569 1570 /* 1571 * Adjust region info to make it start at the beginning at the 1572 * 'start' ECC byte. 1573 */ 1574 oobregion->offset += byte - pos; 1575 oobregion->length -= byte - pos; 1576 *sectionp = section; 1577 1578 return 0; 1579 } 1580 1581 /** 1582 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific 1583 * ECC byte 1584 * @mtd: mtd info structure 1585 * @eccbyte: the byte we are searching for 1586 * @section: pointer where the section id will be stored 1587 * @oobregion: OOB region information 1588 * 1589 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC 1590 * byte. 1591 * 1592 * Returns zero on success, a negative error code otherwise. 1593 */ 1594 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, 1595 int *section, 1596 struct mtd_oob_region *oobregion) 1597 { 1598 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion, 1599 mtd_ooblayout_ecc); 1600 } 1601 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); 1602 1603 /** 1604 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer 1605 * @mtd: mtd info structure 1606 * @buf: destination buffer to store OOB bytes 1607 * @oobbuf: OOB buffer 1608 * @start: first byte to retrieve 1609 * @nbytes: number of bytes to retrieve 1610 * @iter: section iterator 1611 * 1612 * Extract bytes attached to a specific category (ECC or free) 1613 * from the OOB buffer and copy them into buf. 1614 * 1615 * Returns zero on success, a negative error code otherwise. 1616 */ 1617 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, 1618 const u8 *oobbuf, int start, int nbytes, 1619 int (*iter)(struct mtd_info *, 1620 int section, 1621 struct mtd_oob_region *oobregion)) 1622 { 1623 struct mtd_oob_region oobregion; 1624 int section, ret; 1625 1626 ret = mtd_ooblayout_find_region(mtd, start, §ion, 1627 &oobregion, iter); 1628 1629 while (!ret) { 1630 int cnt; 1631 1632 cnt = min_t(int, nbytes, oobregion.length); 1633 memcpy(buf, oobbuf + oobregion.offset, cnt); 1634 buf += cnt; 1635 nbytes -= cnt; 1636 1637 if (!nbytes) 1638 break; 1639 1640 ret = iter(mtd, ++section, &oobregion); 1641 } 1642 1643 return ret; 1644 } 1645 1646 /** 1647 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer 1648 * @mtd: mtd info structure 1649 * @buf: source buffer to get OOB bytes from 1650 * @oobbuf: OOB buffer 1651 * @start: first OOB byte to set 1652 * @nbytes: number of OOB bytes to set 1653 * @iter: section iterator 1654 * 1655 * Fill the OOB buffer with data provided in buf. The category (ECC or free) 1656 * is selected by passing the appropriate iterator. 1657 * 1658 * Returns zero on success, a negative error code otherwise. 1659 */ 1660 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, 1661 u8 *oobbuf, int start, int nbytes, 1662 int (*iter)(struct mtd_info *, 1663 int section, 1664 struct mtd_oob_region *oobregion)) 1665 { 1666 struct mtd_oob_region oobregion; 1667 int section, ret; 1668 1669 ret = mtd_ooblayout_find_region(mtd, start, §ion, 1670 &oobregion, iter); 1671 1672 while (!ret) { 1673 int cnt; 1674 1675 cnt = min_t(int, nbytes, oobregion.length); 1676 memcpy(oobbuf + oobregion.offset, buf, cnt); 1677 buf += cnt; 1678 nbytes -= cnt; 1679 1680 if (!nbytes) 1681 break; 1682 1683 ret = iter(mtd, ++section, &oobregion); 1684 } 1685 1686 return ret; 1687 } 1688 1689 /** 1690 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category 1691 * @mtd: mtd info structure 1692 * @iter: category iterator 1693 * 1694 * Count the number of bytes in a given category. 1695 * 1696 * Returns a positive value on success, a negative error code otherwise. 1697 */ 1698 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, 1699 int (*iter)(struct mtd_info *, 1700 int section, 1701 struct mtd_oob_region *oobregion)) 1702 { 1703 struct mtd_oob_region oobregion; 1704 int section = 0, ret, nbytes = 0; 1705 1706 while (1) { 1707 ret = iter(mtd, section++, &oobregion); 1708 if (ret) { 1709 if (ret == -ERANGE) 1710 ret = nbytes; 1711 break; 1712 } 1713 1714 nbytes += oobregion.length; 1715 } 1716 1717 return ret; 1718 } 1719 1720 /** 1721 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer 1722 * @mtd: mtd info structure 1723 * @eccbuf: destination buffer to store ECC bytes 1724 * @oobbuf: OOB buffer 1725 * @start: first ECC byte to retrieve 1726 * @nbytes: number of ECC bytes to retrieve 1727 * 1728 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. 1729 * 1730 * Returns zero on success, a negative error code otherwise. 1731 */ 1732 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, 1733 const u8 *oobbuf, int start, int nbytes) 1734 { 1735 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes, 1736 mtd_ooblayout_ecc); 1737 } 1738 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); 1739 1740 /** 1741 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer 1742 * @mtd: mtd info structure 1743 * @eccbuf: source buffer to get ECC bytes from 1744 * @oobbuf: OOB buffer 1745 * @start: first ECC byte to set 1746 * @nbytes: number of ECC bytes to set 1747 * 1748 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. 1749 * 1750 * Returns zero on success, a negative error code otherwise. 1751 */ 1752 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, 1753 u8 *oobbuf, int start, int nbytes) 1754 { 1755 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes, 1756 mtd_ooblayout_ecc); 1757 } 1758 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); 1759 1760 /** 1761 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer 1762 * @mtd: mtd info structure 1763 * @databuf: destination buffer to store ECC bytes 1764 * @oobbuf: OOB buffer 1765 * @start: first ECC byte to retrieve 1766 * @nbytes: number of ECC bytes to retrieve 1767 * 1768 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. 1769 * 1770 * Returns zero on success, a negative error code otherwise. 1771 */ 1772 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, 1773 const u8 *oobbuf, int start, int nbytes) 1774 { 1775 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes, 1776 mtd_ooblayout_free); 1777 } 1778 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); 1779 1780 /** 1781 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer 1782 * @mtd: mtd info structure 1783 * @databuf: source buffer to get data bytes from 1784 * @oobbuf: OOB buffer 1785 * @start: first ECC byte to set 1786 * @nbytes: number of ECC bytes to set 1787 * 1788 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes. 1789 * 1790 * Returns zero on success, a negative error code otherwise. 1791 */ 1792 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, 1793 u8 *oobbuf, int start, int nbytes) 1794 { 1795 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes, 1796 mtd_ooblayout_free); 1797 } 1798 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); 1799 1800 /** 1801 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB 1802 * @mtd: mtd info structure 1803 * 1804 * Works like mtd_ooblayout_count_bytes(), except it count free bytes. 1805 * 1806 * Returns zero on success, a negative error code otherwise. 1807 */ 1808 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) 1809 { 1810 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free); 1811 } 1812 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); 1813 1814 /** 1815 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB 1816 * @mtd: mtd info structure 1817 * 1818 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. 1819 * 1820 * Returns zero on success, a negative error code otherwise. 1821 */ 1822 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) 1823 { 1824 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc); 1825 } 1826 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); 1827 1828 /* 1829 * Method to access the protection register area, present in some flash 1830 * devices. The user data is one time programmable but the factory data is read 1831 * only. 1832 */ 1833 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1834 struct otp_info *buf) 1835 { 1836 struct mtd_info *master = mtd_get_master(mtd); 1837 1838 if (!master->_get_fact_prot_info) 1839 return -EOPNOTSUPP; 1840 if (!len) 1841 return 0; 1842 return master->_get_fact_prot_info(master, len, retlen, buf); 1843 } 1844 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); 1845 1846 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1847 size_t *retlen, u_char *buf) 1848 { 1849 struct mtd_info *master = mtd_get_master(mtd); 1850 1851 *retlen = 0; 1852 if (!master->_read_fact_prot_reg) 1853 return -EOPNOTSUPP; 1854 if (!len) 1855 return 0; 1856 return master->_read_fact_prot_reg(master, from, len, retlen, buf); 1857 } 1858 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); 1859 1860 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, 1861 struct otp_info *buf) 1862 { 1863 struct mtd_info *master = mtd_get_master(mtd); 1864 1865 if (!master->_get_user_prot_info) 1866 return -EOPNOTSUPP; 1867 if (!len) 1868 return 0; 1869 return master->_get_user_prot_info(master, len, retlen, buf); 1870 } 1871 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); 1872 1873 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 1874 size_t *retlen, u_char *buf) 1875 { 1876 struct mtd_info *master = mtd_get_master(mtd); 1877 1878 *retlen = 0; 1879 if (!master->_read_user_prot_reg) 1880 return -EOPNOTSUPP; 1881 if (!len) 1882 return 0; 1883 return master->_read_user_prot_reg(master, from, len, retlen, buf); 1884 } 1885 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); 1886 1887 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, 1888 size_t *retlen, u_char *buf) 1889 { 1890 struct mtd_info *master = mtd_get_master(mtd); 1891 int ret; 1892 1893 *retlen = 0; 1894 if (!master->_write_user_prot_reg) 1895 return -EOPNOTSUPP; 1896 if (!len) 1897 return 0; 1898 ret = master->_write_user_prot_reg(master, to, len, retlen, buf); 1899 if (ret) 1900 return ret; 1901 1902 /* 1903 * If no data could be written at all, we are out of memory and 1904 * must return -ENOSPC. 1905 */ 1906 return (*retlen) ? 0 : -ENOSPC; 1907 } 1908 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); 1909 1910 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 1911 { 1912 struct mtd_info *master = mtd_get_master(mtd); 1913 1914 if (!master->_lock_user_prot_reg) 1915 return -EOPNOTSUPP; 1916 if (!len) 1917 return 0; 1918 return master->_lock_user_prot_reg(master, from, len); 1919 } 1920 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); 1921 1922 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 1923 { 1924 struct mtd_info *master = mtd_get_master(mtd); 1925 1926 if (!master->_erase_user_prot_reg) 1927 return -EOPNOTSUPP; 1928 if (!len) 1929 return 0; 1930 return master->_erase_user_prot_reg(master, from, len); 1931 } 1932 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg); 1933 1934 /* Chip-supported device locking */ 1935 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1936 { 1937 struct mtd_info *master = mtd_get_master(mtd); 1938 1939 if (!master->_lock) 1940 return -EOPNOTSUPP; 1941 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1942 return -EINVAL; 1943 if (!len) 1944 return 0; 1945 1946 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1947 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 1948 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 1949 } 1950 1951 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); 1952 } 1953 EXPORT_SYMBOL_GPL(mtd_lock); 1954 1955 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1956 { 1957 struct mtd_info *master = mtd_get_master(mtd); 1958 1959 if (!master->_unlock) 1960 return -EOPNOTSUPP; 1961 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1962 return -EINVAL; 1963 if (!len) 1964 return 0; 1965 1966 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1967 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 1968 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 1969 } 1970 1971 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); 1972 } 1973 EXPORT_SYMBOL_GPL(mtd_unlock); 1974 1975 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1976 { 1977 struct mtd_info *master = mtd_get_master(mtd); 1978 1979 if (!master->_is_locked) 1980 return -EOPNOTSUPP; 1981 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) 1982 return -EINVAL; 1983 if (!len) 1984 return 0; 1985 1986 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { 1987 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 1988 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; 1989 } 1990 1991 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); 1992 } 1993 EXPORT_SYMBOL_GPL(mtd_is_locked); 1994 1995 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) 1996 { 1997 struct mtd_info *master = mtd_get_master(mtd); 1998 1999 if (ofs < 0 || ofs >= mtd->size) 2000 return -EINVAL; 2001 if (!master->_block_isreserved) 2002 return 0; 2003 2004 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2005 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2006 2007 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); 2008 } 2009 EXPORT_SYMBOL_GPL(mtd_block_isreserved); 2010 2011 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) 2012 { 2013 struct mtd_info *master = mtd_get_master(mtd); 2014 2015 if (ofs < 0 || ofs >= mtd->size) 2016 return -EINVAL; 2017 if (!master->_block_isbad) 2018 return 0; 2019 2020 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2021 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2022 2023 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); 2024 } 2025 EXPORT_SYMBOL_GPL(mtd_block_isbad); 2026 2027 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) 2028 { 2029 struct mtd_info *master = mtd_get_master(mtd); 2030 int ret; 2031 2032 if (!master->_block_markbad) 2033 return -EOPNOTSUPP; 2034 if (ofs < 0 || ofs >= mtd->size) 2035 return -EINVAL; 2036 if (!(mtd->flags & MTD_WRITEABLE)) 2037 return -EROFS; 2038 2039 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) 2040 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; 2041 2042 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); 2043 if (ret) 2044 return ret; 2045 2046 while (mtd->parent) { 2047 mtd->ecc_stats.badblocks++; 2048 mtd = mtd->parent; 2049 } 2050 2051 return 0; 2052 } 2053 EXPORT_SYMBOL_GPL(mtd_block_markbad); 2054 2055 /* 2056 * default_mtd_writev - the default writev method 2057 * @mtd: mtd device description object pointer 2058 * @vecs: the vectors to write 2059 * @count: count of vectors in @vecs 2060 * @to: the MTD device offset to write to 2061 * @retlen: on exit contains the count of bytes written to the MTD device. 2062 * 2063 * This function returns zero in case of success and a negative error code in 2064 * case of failure. 2065 */ 2066 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 2067 unsigned long count, loff_t to, size_t *retlen) 2068 { 2069 unsigned long i; 2070 size_t totlen = 0, thislen; 2071 int ret = 0; 2072 2073 for (i = 0; i < count; i++) { 2074 if (!vecs[i].iov_len) 2075 continue; 2076 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, 2077 vecs[i].iov_base); 2078 totlen += thislen; 2079 if (ret || thislen != vecs[i].iov_len) 2080 break; 2081 to += vecs[i].iov_len; 2082 } 2083 *retlen = totlen; 2084 return ret; 2085 } 2086 2087 /* 2088 * mtd_writev - the vector-based MTD write method 2089 * @mtd: mtd device description object pointer 2090 * @vecs: the vectors to write 2091 * @count: count of vectors in @vecs 2092 * @to: the MTD device offset to write to 2093 * @retlen: on exit contains the count of bytes written to the MTD device. 2094 * 2095 * This function returns zero in case of success and a negative error code in 2096 * case of failure. 2097 */ 2098 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 2099 unsigned long count, loff_t to, size_t *retlen) 2100 { 2101 struct mtd_info *master = mtd_get_master(mtd); 2102 2103 *retlen = 0; 2104 if (!(mtd->flags & MTD_WRITEABLE)) 2105 return -EROFS; 2106 2107 if (!master->_writev) 2108 return default_mtd_writev(mtd, vecs, count, to, retlen); 2109 2110 return master->_writev(master, vecs, count, 2111 mtd_get_master_ofs(mtd, to), retlen); 2112 } 2113 EXPORT_SYMBOL_GPL(mtd_writev); 2114 2115 /** 2116 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size 2117 * @mtd: mtd device description object pointer 2118 * @size: a pointer to the ideal or maximum size of the allocation, points 2119 * to the actual allocation size on success. 2120 * 2121 * This routine attempts to allocate a contiguous kernel buffer up to 2122 * the specified size, backing off the size of the request exponentially 2123 * until the request succeeds or until the allocation size falls below 2124 * the system page size. This attempts to make sure it does not adversely 2125 * impact system performance, so when allocating more than one page, we 2126 * ask the memory allocator to avoid re-trying, swapping, writing back 2127 * or performing I/O. 2128 * 2129 * Note, this function also makes sure that the allocated buffer is aligned to 2130 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. 2131 * 2132 * This is called, for example by mtd_{read,write} and jffs2_scan_medium, 2133 * to handle smaller (i.e. degraded) buffer allocations under low- or 2134 * fragmented-memory situations where such reduced allocations, from a 2135 * requested ideal, are allowed. 2136 * 2137 * Returns a pointer to the allocated buffer on success; otherwise, NULL. 2138 */ 2139 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) 2140 { 2141 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY; 2142 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); 2143 void *kbuf; 2144 2145 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); 2146 2147 while (*size > min_alloc) { 2148 kbuf = kmalloc(*size, flags); 2149 if (kbuf) 2150 return kbuf; 2151 2152 *size >>= 1; 2153 *size = ALIGN(*size, mtd->writesize); 2154 } 2155 2156 /* 2157 * For the last resort allocation allow 'kmalloc()' to do all sorts of 2158 * things (write-back, dropping caches, etc) by using GFP_KERNEL. 2159 */ 2160 return kmalloc(*size, GFP_KERNEL); 2161 } 2162 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); 2163 2164 #ifdef CONFIG_PROC_FS 2165 2166 /*====================================================================*/ 2167 /* Support for /proc/mtd */ 2168 2169 static int mtd_proc_show(struct seq_file *m, void *v) 2170 { 2171 struct mtd_info *mtd; 2172 2173 seq_puts(m, "dev: size erasesize name\n"); 2174 mutex_lock(&mtd_table_mutex); 2175 mtd_for_each_device(mtd) { 2176 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n", 2177 mtd->index, (unsigned long long)mtd->size, 2178 mtd->erasesize, mtd->name); 2179 } 2180 mutex_unlock(&mtd_table_mutex); 2181 return 0; 2182 } 2183 #endif /* CONFIG_PROC_FS */ 2184 2185 /*====================================================================*/ 2186 /* Init code */ 2187 2188 static struct backing_dev_info * __init mtd_bdi_init(const char *name) 2189 { 2190 struct backing_dev_info *bdi; 2191 int ret; 2192 2193 bdi = bdi_alloc(NUMA_NO_NODE); 2194 if (!bdi) 2195 return ERR_PTR(-ENOMEM); 2196 bdi->ra_pages = 0; 2197 bdi->io_pages = 0; 2198 2199 /* 2200 * We put '-0' suffix to the name to get the same name format as we 2201 * used to get. Since this is called only once, we get a unique name. 2202 */ 2203 ret = bdi_register(bdi, "%.28s-0", name); 2204 if (ret) 2205 bdi_put(bdi); 2206 2207 return ret ? ERR_PTR(ret) : bdi; 2208 } 2209 2210 static struct proc_dir_entry *proc_mtd; 2211 2212 static int __init init_mtd(void) 2213 { 2214 int ret; 2215 2216 ret = class_register(&mtd_class); 2217 if (ret) 2218 goto err_reg; 2219 2220 mtd_bdi = mtd_bdi_init("mtd"); 2221 if (IS_ERR(mtd_bdi)) { 2222 ret = PTR_ERR(mtd_bdi); 2223 goto err_bdi; 2224 } 2225 2226 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show); 2227 2228 ret = init_mtdchar(); 2229 if (ret) 2230 goto out_procfs; 2231 2232 dfs_dir_mtd = debugfs_create_dir("mtd", NULL); 2233 2234 return 0; 2235 2236 out_procfs: 2237 if (proc_mtd) 2238 remove_proc_entry("mtd", NULL); 2239 bdi_put(mtd_bdi); 2240 err_bdi: 2241 class_unregister(&mtd_class); 2242 err_reg: 2243 pr_err("Error registering mtd class or bdi: %d\n", ret); 2244 return ret; 2245 } 2246 2247 static void __exit cleanup_mtd(void) 2248 { 2249 debugfs_remove_recursive(dfs_dir_mtd); 2250 cleanup_mtdchar(); 2251 if (proc_mtd) 2252 remove_proc_entry("mtd", NULL); 2253 class_unregister(&mtd_class); 2254 bdi_put(mtd_bdi); 2255 idr_destroy(&mtd_idr); 2256 } 2257 2258 module_init(init_mtd); 2259 module_exit(cleanup_mtd); 2260 2261 MODULE_LICENSE("GPL"); 2262 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 2263 MODULE_DESCRIPTION("Core MTD registration and access routines"); 2264