1 /* 2 * Core registration and callback routines for MTD 3 * drivers and users. 4 * 5 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> 6 * Copyright © 2006 Red Hat UK Limited 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/ptrace.h> 27 #include <linux/seq_file.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/major.h> 31 #include <linux/fs.h> 32 #include <linux/err.h> 33 #include <linux/ioctl.h> 34 #include <linux/init.h> 35 #include <linux/proc_fs.h> 36 #include <linux/idr.h> 37 #include <linux/backing-dev.h> 38 #include <linux/gfp.h> 39 40 #include <linux/mtd/mtd.h> 41 #include <linux/mtd/partitions.h> 42 43 #include "mtdcore.h" 44 /* 45 * backing device capabilities for non-mappable devices (such as NAND flash) 46 * - permits private mappings, copies are taken of the data 47 */ 48 static struct backing_dev_info mtd_bdi_unmappable = { 49 .capabilities = BDI_CAP_MAP_COPY, 50 }; 51 52 /* 53 * backing device capabilities for R/O mappable devices (such as ROM) 54 * - permits private mappings, copies are taken of the data 55 * - permits non-writable shared mappings 56 */ 57 static struct backing_dev_info mtd_bdi_ro_mappable = { 58 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | 59 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), 60 }; 61 62 /* 63 * backing device capabilities for writable mappable devices (such as RAM) 64 * - permits private mappings, copies are taken of the data 65 * - permits non-writable shared mappings 66 */ 67 static struct backing_dev_info mtd_bdi_rw_mappable = { 68 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | 69 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | 70 BDI_CAP_WRITE_MAP), 71 }; 72 73 static int mtd_cls_suspend(struct device *dev, pm_message_t state); 74 static int mtd_cls_resume(struct device *dev); 75 76 static struct class mtd_class = { 77 .name = "mtd", 78 .owner = THIS_MODULE, 79 .suspend = mtd_cls_suspend, 80 .resume = mtd_cls_resume, 81 }; 82 83 static DEFINE_IDR(mtd_idr); 84 85 /* These are exported solely for the purpose of mtd_blkdevs.c. You 86 should not use them for _anything_ else */ 87 DEFINE_MUTEX(mtd_table_mutex); 88 EXPORT_SYMBOL_GPL(mtd_table_mutex); 89 90 struct mtd_info *__mtd_next_device(int i) 91 { 92 return idr_get_next(&mtd_idr, &i); 93 } 94 EXPORT_SYMBOL_GPL(__mtd_next_device); 95 96 static LIST_HEAD(mtd_notifiers); 97 98 99 #if defined(CONFIG_MTD_CHAR) || defined(CONFIG_MTD_CHAR_MODULE) 100 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) 101 #else 102 #define MTD_DEVT(index) 0 103 #endif 104 105 /* REVISIT once MTD uses the driver model better, whoever allocates 106 * the mtd_info will probably want to use the release() hook... 107 */ 108 static void mtd_release(struct device *dev) 109 { 110 struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev); 111 dev_t index = MTD_DEVT(mtd->index); 112 113 /* remove /dev/mtdXro node if needed */ 114 if (index) 115 device_destroy(&mtd_class, index + 1); 116 } 117 118 static int mtd_cls_suspend(struct device *dev, pm_message_t state) 119 { 120 struct mtd_info *mtd = dev_get_drvdata(dev); 121 122 return mtd ? mtd_suspend(mtd) : 0; 123 } 124 125 static int mtd_cls_resume(struct device *dev) 126 { 127 struct mtd_info *mtd = dev_get_drvdata(dev); 128 129 if (mtd) 130 mtd_resume(mtd); 131 return 0; 132 } 133 134 static ssize_t mtd_type_show(struct device *dev, 135 struct device_attribute *attr, char *buf) 136 { 137 struct mtd_info *mtd = dev_get_drvdata(dev); 138 char *type; 139 140 switch (mtd->type) { 141 case MTD_ABSENT: 142 type = "absent"; 143 break; 144 case MTD_RAM: 145 type = "ram"; 146 break; 147 case MTD_ROM: 148 type = "rom"; 149 break; 150 case MTD_NORFLASH: 151 type = "nor"; 152 break; 153 case MTD_NANDFLASH: 154 type = "nand"; 155 break; 156 case MTD_DATAFLASH: 157 type = "dataflash"; 158 break; 159 case MTD_UBIVOLUME: 160 type = "ubi"; 161 break; 162 default: 163 type = "unknown"; 164 } 165 166 return snprintf(buf, PAGE_SIZE, "%s\n", type); 167 } 168 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL); 169 170 static ssize_t mtd_flags_show(struct device *dev, 171 struct device_attribute *attr, char *buf) 172 { 173 struct mtd_info *mtd = dev_get_drvdata(dev); 174 175 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); 176 177 } 178 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL); 179 180 static ssize_t mtd_size_show(struct device *dev, 181 struct device_attribute *attr, char *buf) 182 { 183 struct mtd_info *mtd = dev_get_drvdata(dev); 184 185 return snprintf(buf, PAGE_SIZE, "%llu\n", 186 (unsigned long long)mtd->size); 187 188 } 189 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL); 190 191 static ssize_t mtd_erasesize_show(struct device *dev, 192 struct device_attribute *attr, char *buf) 193 { 194 struct mtd_info *mtd = dev_get_drvdata(dev); 195 196 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); 197 198 } 199 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL); 200 201 static ssize_t mtd_writesize_show(struct device *dev, 202 struct device_attribute *attr, char *buf) 203 { 204 struct mtd_info *mtd = dev_get_drvdata(dev); 205 206 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); 207 208 } 209 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL); 210 211 static ssize_t mtd_subpagesize_show(struct device *dev, 212 struct device_attribute *attr, char *buf) 213 { 214 struct mtd_info *mtd = dev_get_drvdata(dev); 215 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; 216 217 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); 218 219 } 220 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL); 221 222 static ssize_t mtd_oobsize_show(struct device *dev, 223 struct device_attribute *attr, char *buf) 224 { 225 struct mtd_info *mtd = dev_get_drvdata(dev); 226 227 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); 228 229 } 230 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL); 231 232 static ssize_t mtd_numeraseregions_show(struct device *dev, 233 struct device_attribute *attr, char *buf) 234 { 235 struct mtd_info *mtd = dev_get_drvdata(dev); 236 237 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); 238 239 } 240 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show, 241 NULL); 242 243 static ssize_t mtd_name_show(struct device *dev, 244 struct device_attribute *attr, char *buf) 245 { 246 struct mtd_info *mtd = dev_get_drvdata(dev); 247 248 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); 249 250 } 251 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); 252 253 static ssize_t mtd_ecc_strength_show(struct device *dev, 254 struct device_attribute *attr, char *buf) 255 { 256 struct mtd_info *mtd = dev_get_drvdata(dev); 257 258 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength); 259 } 260 static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL); 261 262 static ssize_t mtd_bitflip_threshold_show(struct device *dev, 263 struct device_attribute *attr, 264 char *buf) 265 { 266 struct mtd_info *mtd = dev_get_drvdata(dev); 267 268 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold); 269 } 270 271 static ssize_t mtd_bitflip_threshold_store(struct device *dev, 272 struct device_attribute *attr, 273 const char *buf, size_t count) 274 { 275 struct mtd_info *mtd = dev_get_drvdata(dev); 276 unsigned int bitflip_threshold; 277 int retval; 278 279 retval = kstrtouint(buf, 0, &bitflip_threshold); 280 if (retval) 281 return retval; 282 283 mtd->bitflip_threshold = bitflip_threshold; 284 return count; 285 } 286 static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR, 287 mtd_bitflip_threshold_show, 288 mtd_bitflip_threshold_store); 289 290 static struct attribute *mtd_attrs[] = { 291 &dev_attr_type.attr, 292 &dev_attr_flags.attr, 293 &dev_attr_size.attr, 294 &dev_attr_erasesize.attr, 295 &dev_attr_writesize.attr, 296 &dev_attr_subpagesize.attr, 297 &dev_attr_oobsize.attr, 298 &dev_attr_numeraseregions.attr, 299 &dev_attr_name.attr, 300 &dev_attr_ecc_strength.attr, 301 &dev_attr_bitflip_threshold.attr, 302 NULL, 303 }; 304 305 static struct attribute_group mtd_group = { 306 .attrs = mtd_attrs, 307 }; 308 309 static const struct attribute_group *mtd_groups[] = { 310 &mtd_group, 311 NULL, 312 }; 313 314 static struct device_type mtd_devtype = { 315 .name = "mtd", 316 .groups = mtd_groups, 317 .release = mtd_release, 318 }; 319 320 /** 321 * add_mtd_device - register an MTD device 322 * @mtd: pointer to new MTD device info structure 323 * 324 * Add a device to the list of MTD devices present in the system, and 325 * notify each currently active MTD 'user' of its arrival. Returns 326 * zero on success or 1 on failure, which currently will only happen 327 * if there is insufficient memory or a sysfs error. 328 */ 329 330 int add_mtd_device(struct mtd_info *mtd) 331 { 332 struct mtd_notifier *not; 333 int i, error; 334 335 if (!mtd->backing_dev_info) { 336 switch (mtd->type) { 337 case MTD_RAM: 338 mtd->backing_dev_info = &mtd_bdi_rw_mappable; 339 break; 340 case MTD_ROM: 341 mtd->backing_dev_info = &mtd_bdi_ro_mappable; 342 break; 343 default: 344 mtd->backing_dev_info = &mtd_bdi_unmappable; 345 break; 346 } 347 } 348 349 BUG_ON(mtd->writesize == 0); 350 mutex_lock(&mtd_table_mutex); 351 352 do { 353 if (!idr_pre_get(&mtd_idr, GFP_KERNEL)) 354 goto fail_locked; 355 error = idr_get_new(&mtd_idr, mtd, &i); 356 } while (error == -EAGAIN); 357 358 if (error) 359 goto fail_locked; 360 361 mtd->index = i; 362 mtd->usecount = 0; 363 364 /* default value if not set by driver */ 365 if (mtd->bitflip_threshold == 0) 366 mtd->bitflip_threshold = mtd->ecc_strength; 367 368 if (is_power_of_2(mtd->erasesize)) 369 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 370 else 371 mtd->erasesize_shift = 0; 372 373 if (is_power_of_2(mtd->writesize)) 374 mtd->writesize_shift = ffs(mtd->writesize) - 1; 375 else 376 mtd->writesize_shift = 0; 377 378 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 379 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 380 381 /* Some chips always power up locked. Unlock them now */ 382 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { 383 error = mtd_unlock(mtd, 0, mtd->size); 384 if (error && error != -EOPNOTSUPP) 385 printk(KERN_WARNING 386 "%s: unlock failed, writes may not work\n", 387 mtd->name); 388 } 389 390 /* Caller should have set dev.parent to match the 391 * physical device. 392 */ 393 mtd->dev.type = &mtd_devtype; 394 mtd->dev.class = &mtd_class; 395 mtd->dev.devt = MTD_DEVT(i); 396 dev_set_name(&mtd->dev, "mtd%d", i); 397 dev_set_drvdata(&mtd->dev, mtd); 398 if (device_register(&mtd->dev) != 0) 399 goto fail_added; 400 401 if (MTD_DEVT(i)) 402 device_create(&mtd_class, mtd->dev.parent, 403 MTD_DEVT(i) + 1, 404 NULL, "mtd%dro", i); 405 406 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); 407 /* No need to get a refcount on the module containing 408 the notifier, since we hold the mtd_table_mutex */ 409 list_for_each_entry(not, &mtd_notifiers, list) 410 not->add(mtd); 411 412 mutex_unlock(&mtd_table_mutex); 413 /* We _know_ we aren't being removed, because 414 our caller is still holding us here. So none 415 of this try_ nonsense, and no bitching about it 416 either. :) */ 417 __module_get(THIS_MODULE); 418 return 0; 419 420 fail_added: 421 idr_remove(&mtd_idr, i); 422 fail_locked: 423 mutex_unlock(&mtd_table_mutex); 424 return 1; 425 } 426 427 /** 428 * del_mtd_device - unregister an MTD device 429 * @mtd: pointer to MTD device info structure 430 * 431 * Remove a device from the list of MTD devices present in the system, 432 * and notify each currently active MTD 'user' of its departure. 433 * Returns zero on success or 1 on failure, which currently will happen 434 * if the requested device does not appear to be present in the list. 435 */ 436 437 int del_mtd_device(struct mtd_info *mtd) 438 { 439 int ret; 440 struct mtd_notifier *not; 441 442 mutex_lock(&mtd_table_mutex); 443 444 if (idr_find(&mtd_idr, mtd->index) != mtd) { 445 ret = -ENODEV; 446 goto out_error; 447 } 448 449 /* No need to get a refcount on the module containing 450 the notifier, since we hold the mtd_table_mutex */ 451 list_for_each_entry(not, &mtd_notifiers, list) 452 not->remove(mtd); 453 454 if (mtd->usecount) { 455 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", 456 mtd->index, mtd->name, mtd->usecount); 457 ret = -EBUSY; 458 } else { 459 device_unregister(&mtd->dev); 460 461 idr_remove(&mtd_idr, mtd->index); 462 463 module_put(THIS_MODULE); 464 ret = 0; 465 } 466 467 out_error: 468 mutex_unlock(&mtd_table_mutex); 469 return ret; 470 } 471 472 /** 473 * mtd_device_parse_register - parse partitions and register an MTD device. 474 * 475 * @mtd: the MTD device to register 476 * @types: the list of MTD partition probes to try, see 477 * 'parse_mtd_partitions()' for more information 478 * @parser_data: MTD partition parser-specific data 479 * @parts: fallback partition information to register, if parsing fails; 480 * only valid if %nr_parts > %0 481 * @nr_parts: the number of partitions in parts, if zero then the full 482 * MTD device is registered if no partition info is found 483 * 484 * This function aggregates MTD partitions parsing (done by 485 * 'parse_mtd_partitions()') and MTD device and partitions registering. It 486 * basically follows the most common pattern found in many MTD drivers: 487 * 488 * * It first tries to probe partitions on MTD device @mtd using parsers 489 * specified in @types (if @types is %NULL, then the default list of parsers 490 * is used, see 'parse_mtd_partitions()' for more information). If none are 491 * found this functions tries to fallback to information specified in 492 * @parts/@nr_parts. 493 * * If any partitioning info was found, this function registers the found 494 * partitions. 495 * * If no partitions were found this function just registers the MTD device 496 * @mtd and exits. 497 * 498 * Returns zero in case of success and a negative error code in case of failure. 499 */ 500 int mtd_device_parse_register(struct mtd_info *mtd, const char **types, 501 struct mtd_part_parser_data *parser_data, 502 const struct mtd_partition *parts, 503 int nr_parts) 504 { 505 int err; 506 struct mtd_partition *real_parts; 507 508 err = parse_mtd_partitions(mtd, types, &real_parts, parser_data); 509 if (err <= 0 && nr_parts && parts) { 510 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts, 511 GFP_KERNEL); 512 if (!real_parts) 513 err = -ENOMEM; 514 else 515 err = nr_parts; 516 } 517 518 if (err > 0) { 519 err = add_mtd_partitions(mtd, real_parts, err); 520 kfree(real_parts); 521 } else if (err == 0) { 522 err = add_mtd_device(mtd); 523 if (err == 1) 524 err = -ENODEV; 525 } 526 527 return err; 528 } 529 EXPORT_SYMBOL_GPL(mtd_device_parse_register); 530 531 /** 532 * mtd_device_unregister - unregister an existing MTD device. 533 * 534 * @master: the MTD device to unregister. This will unregister both the master 535 * and any partitions if registered. 536 */ 537 int mtd_device_unregister(struct mtd_info *master) 538 { 539 int err; 540 541 err = del_mtd_partitions(master); 542 if (err) 543 return err; 544 545 if (!device_is_registered(&master->dev)) 546 return 0; 547 548 return del_mtd_device(master); 549 } 550 EXPORT_SYMBOL_GPL(mtd_device_unregister); 551 552 /** 553 * register_mtd_user - register a 'user' of MTD devices. 554 * @new: pointer to notifier info structure 555 * 556 * Registers a pair of callbacks function to be called upon addition 557 * or removal of MTD devices. Causes the 'add' callback to be immediately 558 * invoked for each MTD device currently present in the system. 559 */ 560 void register_mtd_user (struct mtd_notifier *new) 561 { 562 struct mtd_info *mtd; 563 564 mutex_lock(&mtd_table_mutex); 565 566 list_add(&new->list, &mtd_notifiers); 567 568 __module_get(THIS_MODULE); 569 570 mtd_for_each_device(mtd) 571 new->add(mtd); 572 573 mutex_unlock(&mtd_table_mutex); 574 } 575 EXPORT_SYMBOL_GPL(register_mtd_user); 576 577 /** 578 * unregister_mtd_user - unregister a 'user' of MTD devices. 579 * @old: pointer to notifier info structure 580 * 581 * Removes a callback function pair from the list of 'users' to be 582 * notified upon addition or removal of MTD devices. Causes the 583 * 'remove' callback to be immediately invoked for each MTD device 584 * currently present in the system. 585 */ 586 int unregister_mtd_user (struct mtd_notifier *old) 587 { 588 struct mtd_info *mtd; 589 590 mutex_lock(&mtd_table_mutex); 591 592 module_put(THIS_MODULE); 593 594 mtd_for_each_device(mtd) 595 old->remove(mtd); 596 597 list_del(&old->list); 598 mutex_unlock(&mtd_table_mutex); 599 return 0; 600 } 601 EXPORT_SYMBOL_GPL(unregister_mtd_user); 602 603 /** 604 * get_mtd_device - obtain a validated handle for an MTD device 605 * @mtd: last known address of the required MTD device 606 * @num: internal device number of the required MTD device 607 * 608 * Given a number and NULL address, return the num'th entry in the device 609 * table, if any. Given an address and num == -1, search the device table 610 * for a device with that address and return if it's still present. Given 611 * both, return the num'th driver only if its address matches. Return 612 * error code if not. 613 */ 614 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 615 { 616 struct mtd_info *ret = NULL, *other; 617 int err = -ENODEV; 618 619 mutex_lock(&mtd_table_mutex); 620 621 if (num == -1) { 622 mtd_for_each_device(other) { 623 if (other == mtd) { 624 ret = mtd; 625 break; 626 } 627 } 628 } else if (num >= 0) { 629 ret = idr_find(&mtd_idr, num); 630 if (mtd && mtd != ret) 631 ret = NULL; 632 } 633 634 if (!ret) { 635 ret = ERR_PTR(err); 636 goto out; 637 } 638 639 err = __get_mtd_device(ret); 640 if (err) 641 ret = ERR_PTR(err); 642 out: 643 mutex_unlock(&mtd_table_mutex); 644 return ret; 645 } 646 EXPORT_SYMBOL_GPL(get_mtd_device); 647 648 649 int __get_mtd_device(struct mtd_info *mtd) 650 { 651 int err; 652 653 if (!try_module_get(mtd->owner)) 654 return -ENODEV; 655 656 if (mtd->_get_device) { 657 err = mtd->_get_device(mtd); 658 659 if (err) { 660 module_put(mtd->owner); 661 return err; 662 } 663 } 664 mtd->usecount++; 665 return 0; 666 } 667 EXPORT_SYMBOL_GPL(__get_mtd_device); 668 669 /** 670 * get_mtd_device_nm - obtain a validated handle for an MTD device by 671 * device name 672 * @name: MTD device name to open 673 * 674 * This function returns MTD device description structure in case of 675 * success and an error code in case of failure. 676 */ 677 struct mtd_info *get_mtd_device_nm(const char *name) 678 { 679 int err = -ENODEV; 680 struct mtd_info *mtd = NULL, *other; 681 682 mutex_lock(&mtd_table_mutex); 683 684 mtd_for_each_device(other) { 685 if (!strcmp(name, other->name)) { 686 mtd = other; 687 break; 688 } 689 } 690 691 if (!mtd) 692 goto out_unlock; 693 694 err = __get_mtd_device(mtd); 695 if (err) 696 goto out_unlock; 697 698 mutex_unlock(&mtd_table_mutex); 699 return mtd; 700 701 out_unlock: 702 mutex_unlock(&mtd_table_mutex); 703 return ERR_PTR(err); 704 } 705 EXPORT_SYMBOL_GPL(get_mtd_device_nm); 706 707 void put_mtd_device(struct mtd_info *mtd) 708 { 709 mutex_lock(&mtd_table_mutex); 710 __put_mtd_device(mtd); 711 mutex_unlock(&mtd_table_mutex); 712 713 } 714 EXPORT_SYMBOL_GPL(put_mtd_device); 715 716 void __put_mtd_device(struct mtd_info *mtd) 717 { 718 --mtd->usecount; 719 BUG_ON(mtd->usecount < 0); 720 721 if (mtd->_put_device) 722 mtd->_put_device(mtd); 723 724 module_put(mtd->owner); 725 } 726 EXPORT_SYMBOL_GPL(__put_mtd_device); 727 728 /* 729 * Erase is an asynchronous operation. Device drivers are supposed 730 * to call instr->callback() whenever the operation completes, even 731 * if it completes with a failure. 732 * Callers are supposed to pass a callback function and wait for it 733 * to be called before writing to the block. 734 */ 735 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 736 { 737 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr) 738 return -EINVAL; 739 if (!(mtd->flags & MTD_WRITEABLE)) 740 return -EROFS; 741 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 742 if (!instr->len) { 743 instr->state = MTD_ERASE_DONE; 744 mtd_erase_callback(instr); 745 return 0; 746 } 747 return mtd->_erase(mtd, instr); 748 } 749 EXPORT_SYMBOL_GPL(mtd_erase); 750 751 /* 752 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. 753 */ 754 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 755 void **virt, resource_size_t *phys) 756 { 757 *retlen = 0; 758 *virt = NULL; 759 if (phys) 760 *phys = 0; 761 if (!mtd->_point) 762 return -EOPNOTSUPP; 763 if (from < 0 || from > mtd->size || len > mtd->size - from) 764 return -EINVAL; 765 if (!len) 766 return 0; 767 return mtd->_point(mtd, from, len, retlen, virt, phys); 768 } 769 EXPORT_SYMBOL_GPL(mtd_point); 770 771 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 772 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 773 { 774 if (!mtd->_point) 775 return -EOPNOTSUPP; 776 if (from < 0 || from > mtd->size || len > mtd->size - from) 777 return -EINVAL; 778 if (!len) 779 return 0; 780 return mtd->_unpoint(mtd, from, len); 781 } 782 EXPORT_SYMBOL_GPL(mtd_unpoint); 783 784 /* 785 * Allow NOMMU mmap() to directly map the device (if not NULL) 786 * - return the address to which the offset maps 787 * - return -ENOSYS to indicate refusal to do the mapping 788 */ 789 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, 790 unsigned long offset, unsigned long flags) 791 { 792 if (!mtd->_get_unmapped_area) 793 return -EOPNOTSUPP; 794 if (offset > mtd->size || len > mtd->size - offset) 795 return -EINVAL; 796 return mtd->_get_unmapped_area(mtd, len, offset, flags); 797 } 798 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); 799 800 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 801 u_char *buf) 802 { 803 int ret_code; 804 *retlen = 0; 805 if (from < 0 || from > mtd->size || len > mtd->size - from) 806 return -EINVAL; 807 if (!len) 808 return 0; 809 810 /* 811 * In the absence of an error, drivers return a non-negative integer 812 * representing the maximum number of bitflips that were corrected on 813 * any one ecc region (if applicable; zero otherwise). 814 */ 815 ret_code = mtd->_read(mtd, from, len, retlen, buf); 816 if (unlikely(ret_code < 0)) 817 return ret_code; 818 if (mtd->ecc_strength == 0) 819 return 0; /* device lacks ecc */ 820 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 821 } 822 EXPORT_SYMBOL_GPL(mtd_read); 823 824 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 825 const u_char *buf) 826 { 827 *retlen = 0; 828 if (to < 0 || to > mtd->size || len > mtd->size - to) 829 return -EINVAL; 830 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE)) 831 return -EROFS; 832 if (!len) 833 return 0; 834 return mtd->_write(mtd, to, len, retlen, buf); 835 } 836 EXPORT_SYMBOL_GPL(mtd_write); 837 838 /* 839 * In blackbox flight recorder like scenarios we want to make successful writes 840 * in interrupt context. panic_write() is only intended to be called when its 841 * known the kernel is about to panic and we need the write to succeed. Since 842 * the kernel is not going to be running for much longer, this function can 843 * break locks and delay to ensure the write succeeds (but not sleep). 844 */ 845 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, 846 const u_char *buf) 847 { 848 *retlen = 0; 849 if (!mtd->_panic_write) 850 return -EOPNOTSUPP; 851 if (to < 0 || to > mtd->size || len > mtd->size - to) 852 return -EINVAL; 853 if (!(mtd->flags & MTD_WRITEABLE)) 854 return -EROFS; 855 if (!len) 856 return 0; 857 return mtd->_panic_write(mtd, to, len, retlen, buf); 858 } 859 EXPORT_SYMBOL_GPL(mtd_panic_write); 860 861 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 862 { 863 int ret_code; 864 ops->retlen = ops->oobretlen = 0; 865 if (!mtd->_read_oob) 866 return -EOPNOTSUPP; 867 /* 868 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics 869 * similar to mtd->_read(), returning a non-negative integer 870 * representing max bitflips. In other cases, mtd->_read_oob() may 871 * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). 872 */ 873 ret_code = mtd->_read_oob(mtd, from, ops); 874 if (unlikely(ret_code < 0)) 875 return ret_code; 876 if (mtd->ecc_strength == 0) 877 return 0; /* device lacks ecc */ 878 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; 879 } 880 EXPORT_SYMBOL_GPL(mtd_read_oob); 881 882 /* 883 * Method to access the protection register area, present in some flash 884 * devices. The user data is one time programmable but the factory data is read 885 * only. 886 */ 887 int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, 888 size_t len) 889 { 890 if (!mtd->_get_fact_prot_info) 891 return -EOPNOTSUPP; 892 if (!len) 893 return 0; 894 return mtd->_get_fact_prot_info(mtd, buf, len); 895 } 896 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); 897 898 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 899 size_t *retlen, u_char *buf) 900 { 901 *retlen = 0; 902 if (!mtd->_read_fact_prot_reg) 903 return -EOPNOTSUPP; 904 if (!len) 905 return 0; 906 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf); 907 } 908 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); 909 910 int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf, 911 size_t len) 912 { 913 if (!mtd->_get_user_prot_info) 914 return -EOPNOTSUPP; 915 if (!len) 916 return 0; 917 return mtd->_get_user_prot_info(mtd, buf, len); 918 } 919 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); 920 921 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, 922 size_t *retlen, u_char *buf) 923 { 924 *retlen = 0; 925 if (!mtd->_read_user_prot_reg) 926 return -EOPNOTSUPP; 927 if (!len) 928 return 0; 929 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf); 930 } 931 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); 932 933 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, 934 size_t *retlen, u_char *buf) 935 { 936 *retlen = 0; 937 if (!mtd->_write_user_prot_reg) 938 return -EOPNOTSUPP; 939 if (!len) 940 return 0; 941 return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); 942 } 943 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); 944 945 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) 946 { 947 if (!mtd->_lock_user_prot_reg) 948 return -EOPNOTSUPP; 949 if (!len) 950 return 0; 951 return mtd->_lock_user_prot_reg(mtd, from, len); 952 } 953 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); 954 955 /* Chip-supported device locking */ 956 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 957 { 958 if (!mtd->_lock) 959 return -EOPNOTSUPP; 960 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) 961 return -EINVAL; 962 if (!len) 963 return 0; 964 return mtd->_lock(mtd, ofs, len); 965 } 966 EXPORT_SYMBOL_GPL(mtd_lock); 967 968 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 969 { 970 if (!mtd->_unlock) 971 return -EOPNOTSUPP; 972 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) 973 return -EINVAL; 974 if (!len) 975 return 0; 976 return mtd->_unlock(mtd, ofs, len); 977 } 978 EXPORT_SYMBOL_GPL(mtd_unlock); 979 980 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 981 { 982 if (!mtd->_is_locked) 983 return -EOPNOTSUPP; 984 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) 985 return -EINVAL; 986 if (!len) 987 return 0; 988 return mtd->_is_locked(mtd, ofs, len); 989 } 990 EXPORT_SYMBOL_GPL(mtd_is_locked); 991 992 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) 993 { 994 if (!mtd->_block_isbad) 995 return 0; 996 if (ofs < 0 || ofs > mtd->size) 997 return -EINVAL; 998 return mtd->_block_isbad(mtd, ofs); 999 } 1000 EXPORT_SYMBOL_GPL(mtd_block_isbad); 1001 1002 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) 1003 { 1004 if (!mtd->_block_markbad) 1005 return -EOPNOTSUPP; 1006 if (ofs < 0 || ofs > mtd->size) 1007 return -EINVAL; 1008 if (!(mtd->flags & MTD_WRITEABLE)) 1009 return -EROFS; 1010 return mtd->_block_markbad(mtd, ofs); 1011 } 1012 EXPORT_SYMBOL_GPL(mtd_block_markbad); 1013 1014 /* 1015 * default_mtd_writev - the default writev method 1016 * @mtd: mtd device description object pointer 1017 * @vecs: the vectors to write 1018 * @count: count of vectors in @vecs 1019 * @to: the MTD device offset to write to 1020 * @retlen: on exit contains the count of bytes written to the MTD device. 1021 * 1022 * This function returns zero in case of success and a negative error code in 1023 * case of failure. 1024 */ 1025 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 1026 unsigned long count, loff_t to, size_t *retlen) 1027 { 1028 unsigned long i; 1029 size_t totlen = 0, thislen; 1030 int ret = 0; 1031 1032 for (i = 0; i < count; i++) { 1033 if (!vecs[i].iov_len) 1034 continue; 1035 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, 1036 vecs[i].iov_base); 1037 totlen += thislen; 1038 if (ret || thislen != vecs[i].iov_len) 1039 break; 1040 to += vecs[i].iov_len; 1041 } 1042 *retlen = totlen; 1043 return ret; 1044 } 1045 1046 /* 1047 * mtd_writev - the vector-based MTD write method 1048 * @mtd: mtd device description object pointer 1049 * @vecs: the vectors to write 1050 * @count: count of vectors in @vecs 1051 * @to: the MTD device offset to write to 1052 * @retlen: on exit contains the count of bytes written to the MTD device. 1053 * 1054 * This function returns zero in case of success and a negative error code in 1055 * case of failure. 1056 */ 1057 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 1058 unsigned long count, loff_t to, size_t *retlen) 1059 { 1060 *retlen = 0; 1061 if (!(mtd->flags & MTD_WRITEABLE)) 1062 return -EROFS; 1063 if (!mtd->_writev) 1064 return default_mtd_writev(mtd, vecs, count, to, retlen); 1065 return mtd->_writev(mtd, vecs, count, to, retlen); 1066 } 1067 EXPORT_SYMBOL_GPL(mtd_writev); 1068 1069 /** 1070 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size 1071 * @mtd: mtd device description object pointer 1072 * @size: a pointer to the ideal or maximum size of the allocation, points 1073 * to the actual allocation size on success. 1074 * 1075 * This routine attempts to allocate a contiguous kernel buffer up to 1076 * the specified size, backing off the size of the request exponentially 1077 * until the request succeeds or until the allocation size falls below 1078 * the system page size. This attempts to make sure it does not adversely 1079 * impact system performance, so when allocating more than one page, we 1080 * ask the memory allocator to avoid re-trying, swapping, writing back 1081 * or performing I/O. 1082 * 1083 * Note, this function also makes sure that the allocated buffer is aligned to 1084 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. 1085 * 1086 * This is called, for example by mtd_{read,write} and jffs2_scan_medium, 1087 * to handle smaller (i.e. degraded) buffer allocations under low- or 1088 * fragmented-memory situations where such reduced allocations, from a 1089 * requested ideal, are allowed. 1090 * 1091 * Returns a pointer to the allocated buffer on success; otherwise, NULL. 1092 */ 1093 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) 1094 { 1095 gfp_t flags = __GFP_NOWARN | __GFP_WAIT | 1096 __GFP_NORETRY | __GFP_NO_KSWAPD; 1097 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); 1098 void *kbuf; 1099 1100 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); 1101 1102 while (*size > min_alloc) { 1103 kbuf = kmalloc(*size, flags); 1104 if (kbuf) 1105 return kbuf; 1106 1107 *size >>= 1; 1108 *size = ALIGN(*size, mtd->writesize); 1109 } 1110 1111 /* 1112 * For the last resort allocation allow 'kmalloc()' to do all sorts of 1113 * things (write-back, dropping caches, etc) by using GFP_KERNEL. 1114 */ 1115 return kmalloc(*size, GFP_KERNEL); 1116 } 1117 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); 1118 1119 #ifdef CONFIG_PROC_FS 1120 1121 /*====================================================================*/ 1122 /* Support for /proc/mtd */ 1123 1124 static struct proc_dir_entry *proc_mtd; 1125 1126 static int mtd_proc_show(struct seq_file *m, void *v) 1127 { 1128 struct mtd_info *mtd; 1129 1130 seq_puts(m, "dev: size erasesize name\n"); 1131 mutex_lock(&mtd_table_mutex); 1132 mtd_for_each_device(mtd) { 1133 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n", 1134 mtd->index, (unsigned long long)mtd->size, 1135 mtd->erasesize, mtd->name); 1136 } 1137 mutex_unlock(&mtd_table_mutex); 1138 return 0; 1139 } 1140 1141 static int mtd_proc_open(struct inode *inode, struct file *file) 1142 { 1143 return single_open(file, mtd_proc_show, NULL); 1144 } 1145 1146 static const struct file_operations mtd_proc_ops = { 1147 .open = mtd_proc_open, 1148 .read = seq_read, 1149 .llseek = seq_lseek, 1150 .release = single_release, 1151 }; 1152 #endif /* CONFIG_PROC_FS */ 1153 1154 /*====================================================================*/ 1155 /* Init code */ 1156 1157 static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) 1158 { 1159 int ret; 1160 1161 ret = bdi_init(bdi); 1162 if (!ret) 1163 ret = bdi_register(bdi, NULL, name); 1164 1165 if (ret) 1166 bdi_destroy(bdi); 1167 1168 return ret; 1169 } 1170 1171 static int __init init_mtd(void) 1172 { 1173 int ret; 1174 1175 ret = class_register(&mtd_class); 1176 if (ret) 1177 goto err_reg; 1178 1179 ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap"); 1180 if (ret) 1181 goto err_bdi1; 1182 1183 ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap"); 1184 if (ret) 1185 goto err_bdi2; 1186 1187 ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap"); 1188 if (ret) 1189 goto err_bdi3; 1190 1191 #ifdef CONFIG_PROC_FS 1192 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); 1193 #endif /* CONFIG_PROC_FS */ 1194 return 0; 1195 1196 err_bdi3: 1197 bdi_destroy(&mtd_bdi_ro_mappable); 1198 err_bdi2: 1199 bdi_destroy(&mtd_bdi_unmappable); 1200 err_bdi1: 1201 class_unregister(&mtd_class); 1202 err_reg: 1203 pr_err("Error registering mtd class or bdi: %d\n", ret); 1204 return ret; 1205 } 1206 1207 static void __exit cleanup_mtd(void) 1208 { 1209 #ifdef CONFIG_PROC_FS 1210 if (proc_mtd) 1211 remove_proc_entry( "mtd", NULL); 1212 #endif /* CONFIG_PROC_FS */ 1213 class_unregister(&mtd_class); 1214 bdi_destroy(&mtd_bdi_unmappable); 1215 bdi_destroy(&mtd_bdi_ro_mappable); 1216 bdi_destroy(&mtd_bdi_rw_mappable); 1217 } 1218 1219 module_init(init_mtd); 1220 module_exit(cleanup_mtd); 1221 1222 MODULE_LICENSE("GPL"); 1223 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 1224 MODULE_DESCRIPTION("Core MTD registration and access routines"); 1225