1 /* 2 * Simple MTD partitioning layer 3 * 4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> 5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> 6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/types.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/list.h> 29 #include <linux/kmod.h> 30 #include <linux/mtd/mtd.h> 31 #include <linux/mtd/partitions.h> 32 #include <linux/err.h> 33 34 #include "mtdcore.h" 35 36 /* Our partition linked list */ 37 static LIST_HEAD(mtd_partitions); 38 static DEFINE_MUTEX(mtd_partitions_mutex); 39 40 /* Our partition node structure */ 41 struct mtd_part { 42 struct mtd_info mtd; 43 struct mtd_info *master; 44 uint64_t offset; 45 struct list_head list; 46 }; 47 48 /* 49 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve 50 * the pointer to that structure with this macro. 51 */ 52 #define PART(x) ((struct mtd_part *)(x)) 53 54 55 /* 56 * MTD methods which simply translate the effective address and pass through 57 * to the _real_ device. 58 */ 59 60 static int part_read(struct mtd_info *mtd, loff_t from, size_t len, 61 size_t *retlen, u_char *buf) 62 { 63 struct mtd_part *part = PART(mtd); 64 struct mtd_ecc_stats stats; 65 int res; 66 67 stats = part->master->ecc_stats; 68 res = mtd_read(part->master, from + part->offset, len, retlen, buf); 69 if (unlikely(res)) { 70 if (mtd_is_bitflip(res)) 71 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; 72 if (mtd_is_eccerr(res)) 73 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; 74 } 75 return res; 76 } 77 78 static int part_point(struct mtd_info *mtd, loff_t from, size_t len, 79 size_t *retlen, void **virt, resource_size_t *phys) 80 { 81 struct mtd_part *part = PART(mtd); 82 83 return mtd_point(part->master, from + part->offset, len, retlen, 84 virt, phys); 85 } 86 87 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 88 { 89 struct mtd_part *part = PART(mtd); 90 91 return mtd_unpoint(part->master, from + part->offset, len); 92 } 93 94 static unsigned long part_get_unmapped_area(struct mtd_info *mtd, 95 unsigned long len, 96 unsigned long offset, 97 unsigned long flags) 98 { 99 struct mtd_part *part = PART(mtd); 100 101 offset += part->offset; 102 return mtd_get_unmapped_area(part->master, len, offset, flags); 103 } 104 105 static int part_read_oob(struct mtd_info *mtd, loff_t from, 106 struct mtd_oob_ops *ops) 107 { 108 struct mtd_part *part = PART(mtd); 109 int res; 110 111 if (from >= mtd->size) 112 return -EINVAL; 113 if (ops->datbuf && from + ops->len > mtd->size) 114 return -EINVAL; 115 116 /* 117 * If OOB is also requested, make sure that we do not read past the end 118 * of this partition. 119 */ 120 if (ops->oobbuf) { 121 size_t len, pages; 122 123 if (ops->mode == MTD_OPS_AUTO_OOB) 124 len = mtd->oobavail; 125 else 126 len = mtd->oobsize; 127 pages = mtd_div_by_ws(mtd->size, mtd); 128 pages -= mtd_div_by_ws(from, mtd); 129 if (ops->ooboffs + ops->ooblen > pages * len) 130 return -EINVAL; 131 } 132 133 res = mtd_read_oob(part->master, from + part->offset, ops); 134 if (unlikely(res)) { 135 if (mtd_is_bitflip(res)) 136 mtd->ecc_stats.corrected++; 137 if (mtd_is_eccerr(res)) 138 mtd->ecc_stats.failed++; 139 } 140 return res; 141 } 142 143 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 144 size_t len, size_t *retlen, u_char *buf) 145 { 146 struct mtd_part *part = PART(mtd); 147 return mtd_read_user_prot_reg(part->master, from, len, retlen, buf); 148 } 149 150 static int part_get_user_prot_info(struct mtd_info *mtd, 151 struct otp_info *buf, size_t len) 152 { 153 struct mtd_part *part = PART(mtd); 154 return mtd_get_user_prot_info(part->master, buf, len); 155 } 156 157 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 158 size_t len, size_t *retlen, u_char *buf) 159 { 160 struct mtd_part *part = PART(mtd); 161 return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf); 162 } 163 164 static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, 165 size_t len) 166 { 167 struct mtd_part *part = PART(mtd); 168 return mtd_get_fact_prot_info(part->master, buf, len); 169 } 170 171 static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 172 size_t *retlen, const u_char *buf) 173 { 174 struct mtd_part *part = PART(mtd); 175 return mtd_write(part->master, to + part->offset, len, retlen, buf); 176 } 177 178 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 179 size_t *retlen, const u_char *buf) 180 { 181 struct mtd_part *part = PART(mtd); 182 return mtd_panic_write(part->master, to + part->offset, len, retlen, 183 buf); 184 } 185 186 static int part_write_oob(struct mtd_info *mtd, loff_t to, 187 struct mtd_oob_ops *ops) 188 { 189 struct mtd_part *part = PART(mtd); 190 191 if (to >= mtd->size) 192 return -EINVAL; 193 if (ops->datbuf && to + ops->len > mtd->size) 194 return -EINVAL; 195 return mtd_write_oob(part->master, to + part->offset, ops); 196 } 197 198 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 199 size_t len, size_t *retlen, u_char *buf) 200 { 201 struct mtd_part *part = PART(mtd); 202 return mtd_write_user_prot_reg(part->master, from, len, retlen, buf); 203 } 204 205 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 206 size_t len) 207 { 208 struct mtd_part *part = PART(mtd); 209 return mtd_lock_user_prot_reg(part->master, from, len); 210 } 211 212 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 213 unsigned long count, loff_t to, size_t *retlen) 214 { 215 struct mtd_part *part = PART(mtd); 216 return mtd_writev(part->master, vecs, count, to + part->offset, 217 retlen); 218 } 219 220 static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 221 { 222 struct mtd_part *part = PART(mtd); 223 int ret; 224 225 instr->addr += part->offset; 226 ret = mtd_erase(part->master, instr); 227 if (ret) { 228 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 229 instr->fail_addr -= part->offset; 230 instr->addr -= part->offset; 231 } 232 return ret; 233 } 234 235 void mtd_erase_callback(struct erase_info *instr) 236 { 237 if (instr->mtd->_erase == part_erase) { 238 struct mtd_part *part = PART(instr->mtd); 239 240 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 241 instr->fail_addr -= part->offset; 242 instr->addr -= part->offset; 243 } 244 if (instr->callback) 245 instr->callback(instr); 246 } 247 EXPORT_SYMBOL_GPL(mtd_erase_callback); 248 249 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 250 { 251 struct mtd_part *part = PART(mtd); 252 return mtd_lock(part->master, ofs + part->offset, len); 253 } 254 255 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 256 { 257 struct mtd_part *part = PART(mtd); 258 return mtd_unlock(part->master, ofs + part->offset, len); 259 } 260 261 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 262 { 263 struct mtd_part *part = PART(mtd); 264 return mtd_is_locked(part->master, ofs + part->offset, len); 265 } 266 267 static void part_sync(struct mtd_info *mtd) 268 { 269 struct mtd_part *part = PART(mtd); 270 mtd_sync(part->master); 271 } 272 273 static int part_suspend(struct mtd_info *mtd) 274 { 275 struct mtd_part *part = PART(mtd); 276 return mtd_suspend(part->master); 277 } 278 279 static void part_resume(struct mtd_info *mtd) 280 { 281 struct mtd_part *part = PART(mtd); 282 mtd_resume(part->master); 283 } 284 285 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 286 { 287 struct mtd_part *part = PART(mtd); 288 ofs += part->offset; 289 return mtd_block_isbad(part->master, ofs); 290 } 291 292 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 293 { 294 struct mtd_part *part = PART(mtd); 295 int res; 296 297 ofs += part->offset; 298 res = mtd_block_markbad(part->master, ofs); 299 if (!res) 300 mtd->ecc_stats.badblocks++; 301 return res; 302 } 303 304 static inline void free_partition(struct mtd_part *p) 305 { 306 kfree(p->mtd.name); 307 kfree(p); 308 } 309 310 /* 311 * This function unregisters and destroy all slave MTD objects which are 312 * attached to the given master MTD object. 313 */ 314 315 int del_mtd_partitions(struct mtd_info *master) 316 { 317 struct mtd_part *slave, *next; 318 int ret, err = 0; 319 320 mutex_lock(&mtd_partitions_mutex); 321 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 322 if (slave->master == master) { 323 ret = del_mtd_device(&slave->mtd); 324 if (ret < 0) { 325 err = ret; 326 continue; 327 } 328 list_del(&slave->list); 329 free_partition(slave); 330 } 331 mutex_unlock(&mtd_partitions_mutex); 332 333 return err; 334 } 335 336 static struct mtd_part *allocate_partition(struct mtd_info *master, 337 const struct mtd_partition *part, int partno, 338 uint64_t cur_offset) 339 { 340 struct mtd_part *slave; 341 char *name; 342 343 /* allocate the partition structure */ 344 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 345 name = kstrdup(part->name, GFP_KERNEL); 346 if (!name || !slave) { 347 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", 348 master->name); 349 kfree(name); 350 kfree(slave); 351 return ERR_PTR(-ENOMEM); 352 } 353 354 /* set up the MTD object for this partition */ 355 slave->mtd.type = master->type; 356 slave->mtd.flags = master->flags & ~part->mask_flags; 357 slave->mtd.size = part->size; 358 slave->mtd.writesize = master->writesize; 359 slave->mtd.writebufsize = master->writebufsize; 360 slave->mtd.oobsize = master->oobsize; 361 slave->mtd.oobavail = master->oobavail; 362 slave->mtd.subpage_sft = master->subpage_sft; 363 364 slave->mtd.name = name; 365 slave->mtd.owner = master->owner; 366 slave->mtd.backing_dev_info = master->backing_dev_info; 367 368 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone 369 * to have the same data be in two different partitions. 370 */ 371 slave->mtd.dev.parent = master->dev.parent; 372 373 slave->mtd._read = part_read; 374 slave->mtd._write = part_write; 375 376 if (master->_panic_write) 377 slave->mtd._panic_write = part_panic_write; 378 379 if (master->_point && master->_unpoint) { 380 slave->mtd._point = part_point; 381 slave->mtd._unpoint = part_unpoint; 382 } 383 384 if (master->_get_unmapped_area) 385 slave->mtd._get_unmapped_area = part_get_unmapped_area; 386 if (master->_read_oob) 387 slave->mtd._read_oob = part_read_oob; 388 if (master->_write_oob) 389 slave->mtd._write_oob = part_write_oob; 390 if (master->_read_user_prot_reg) 391 slave->mtd._read_user_prot_reg = part_read_user_prot_reg; 392 if (master->_read_fact_prot_reg) 393 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; 394 if (master->_write_user_prot_reg) 395 slave->mtd._write_user_prot_reg = part_write_user_prot_reg; 396 if (master->_lock_user_prot_reg) 397 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; 398 if (master->_get_user_prot_info) 399 slave->mtd._get_user_prot_info = part_get_user_prot_info; 400 if (master->_get_fact_prot_info) 401 slave->mtd._get_fact_prot_info = part_get_fact_prot_info; 402 if (master->_sync) 403 slave->mtd._sync = part_sync; 404 if (!partno && !master->dev.class && master->_suspend && 405 master->_resume) { 406 slave->mtd._suspend = part_suspend; 407 slave->mtd._resume = part_resume; 408 } 409 if (master->_writev) 410 slave->mtd._writev = part_writev; 411 if (master->_lock) 412 slave->mtd._lock = part_lock; 413 if (master->_unlock) 414 slave->mtd._unlock = part_unlock; 415 if (master->_is_locked) 416 slave->mtd._is_locked = part_is_locked; 417 if (master->_block_isbad) 418 slave->mtd._block_isbad = part_block_isbad; 419 if (master->_block_markbad) 420 slave->mtd._block_markbad = part_block_markbad; 421 slave->mtd._erase = part_erase; 422 slave->master = master; 423 slave->offset = part->offset; 424 425 if (slave->offset == MTDPART_OFS_APPEND) 426 slave->offset = cur_offset; 427 if (slave->offset == MTDPART_OFS_NXTBLK) { 428 slave->offset = cur_offset; 429 if (mtd_mod_by_eb(cur_offset, master) != 0) { 430 /* Round up to next erasesize */ 431 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; 432 printk(KERN_NOTICE "Moving partition %d: " 433 "0x%012llx -> 0x%012llx\n", partno, 434 (unsigned long long)cur_offset, (unsigned long long)slave->offset); 435 } 436 } 437 if (slave->offset == MTDPART_OFS_RETAIN) { 438 slave->offset = cur_offset; 439 if (master->size - slave->offset >= slave->mtd.size) { 440 slave->mtd.size = master->size - slave->offset 441 - slave->mtd.size; 442 } else { 443 printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", 444 part->name, master->size - slave->offset, 445 slave->mtd.size); 446 /* register to preserve ordering */ 447 goto out_register; 448 } 449 } 450 if (slave->mtd.size == MTDPART_SIZ_FULL) 451 slave->mtd.size = master->size - slave->offset; 452 453 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, 454 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); 455 456 /* let's do some sanity checks */ 457 if (slave->offset >= master->size) { 458 /* let's register it anyway to preserve ordering */ 459 slave->offset = 0; 460 slave->mtd.size = 0; 461 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 462 part->name); 463 goto out_register; 464 } 465 if (slave->offset + slave->mtd.size > master->size) { 466 slave->mtd.size = master->size - slave->offset; 467 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", 468 part->name, master->name, (unsigned long long)slave->mtd.size); 469 } 470 if (master->numeraseregions > 1) { 471 /* Deal with variable erase size stuff */ 472 int i, max = master->numeraseregions; 473 u64 end = slave->offset + slave->mtd.size; 474 struct mtd_erase_region_info *regions = master->eraseregions; 475 476 /* Find the first erase regions which is part of this 477 * partition. */ 478 for (i = 0; i < max && regions[i].offset <= slave->offset; i++) 479 ; 480 /* The loop searched for the region _behind_ the first one */ 481 if (i > 0) 482 i--; 483 484 /* Pick biggest erasesize */ 485 for (; i < max && regions[i].offset < end; i++) { 486 if (slave->mtd.erasesize < regions[i].erasesize) { 487 slave->mtd.erasesize = regions[i].erasesize; 488 } 489 } 490 BUG_ON(slave->mtd.erasesize == 0); 491 } else { 492 /* Single erase size */ 493 slave->mtd.erasesize = master->erasesize; 494 } 495 496 if ((slave->mtd.flags & MTD_WRITEABLE) && 497 mtd_mod_by_eb(slave->offset, &slave->mtd)) { 498 /* Doesn't start on a boundary of major erase size */ 499 /* FIXME: Let it be writable if it is on a boundary of 500 * _minor_ erase size though */ 501 slave->mtd.flags &= ~MTD_WRITEABLE; 502 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", 503 part->name); 504 } 505 if ((slave->mtd.flags & MTD_WRITEABLE) && 506 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { 507 slave->mtd.flags &= ~MTD_WRITEABLE; 508 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", 509 part->name); 510 } 511 512 slave->mtd.ecclayout = master->ecclayout; 513 if (master->_block_isbad) { 514 uint64_t offs = 0; 515 516 while (offs < slave->mtd.size) { 517 if (mtd_block_isbad(master, offs + slave->offset)) 518 slave->mtd.ecc_stats.badblocks++; 519 offs += slave->mtd.erasesize; 520 } 521 } 522 523 out_register: 524 return slave; 525 } 526 527 int mtd_add_partition(struct mtd_info *master, char *name, 528 long long offset, long long length) 529 { 530 struct mtd_partition part; 531 struct mtd_part *p, *new; 532 uint64_t start, end; 533 int ret = 0; 534 535 /* the direct offset is expected */ 536 if (offset == MTDPART_OFS_APPEND || 537 offset == MTDPART_OFS_NXTBLK) 538 return -EINVAL; 539 540 if (length == MTDPART_SIZ_FULL) 541 length = master->size - offset; 542 543 if (length <= 0) 544 return -EINVAL; 545 546 part.name = name; 547 part.size = length; 548 part.offset = offset; 549 part.mask_flags = 0; 550 part.ecclayout = NULL; 551 552 new = allocate_partition(master, &part, -1, offset); 553 if (IS_ERR(new)) 554 return PTR_ERR(new); 555 556 start = offset; 557 end = offset + length; 558 559 mutex_lock(&mtd_partitions_mutex); 560 list_for_each_entry(p, &mtd_partitions, list) 561 if (p->master == master) { 562 if ((start >= p->offset) && 563 (start < (p->offset + p->mtd.size))) 564 goto err_inv; 565 566 if ((end >= p->offset) && 567 (end < (p->offset + p->mtd.size))) 568 goto err_inv; 569 } 570 571 list_add(&new->list, &mtd_partitions); 572 mutex_unlock(&mtd_partitions_mutex); 573 574 add_mtd_device(&new->mtd); 575 576 return ret; 577 err_inv: 578 mutex_unlock(&mtd_partitions_mutex); 579 free_partition(new); 580 return -EINVAL; 581 } 582 EXPORT_SYMBOL_GPL(mtd_add_partition); 583 584 int mtd_del_partition(struct mtd_info *master, int partno) 585 { 586 struct mtd_part *slave, *next; 587 int ret = -EINVAL; 588 589 mutex_lock(&mtd_partitions_mutex); 590 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 591 if ((slave->master == master) && 592 (slave->mtd.index == partno)) { 593 ret = del_mtd_device(&slave->mtd); 594 if (ret < 0) 595 break; 596 597 list_del(&slave->list); 598 free_partition(slave); 599 break; 600 } 601 mutex_unlock(&mtd_partitions_mutex); 602 603 return ret; 604 } 605 EXPORT_SYMBOL_GPL(mtd_del_partition); 606 607 /* 608 * This function, given a master MTD object and a partition table, creates 609 * and registers slave MTD objects which are bound to the master according to 610 * the partition definitions. 611 * 612 * We don't register the master, or expect the caller to have done so, 613 * for reasons of data integrity. 614 */ 615 616 int add_mtd_partitions(struct mtd_info *master, 617 const struct mtd_partition *parts, 618 int nbparts) 619 { 620 struct mtd_part *slave; 621 uint64_t cur_offset = 0; 622 int i; 623 624 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 625 626 for (i = 0; i < nbparts; i++) { 627 slave = allocate_partition(master, parts + i, i, cur_offset); 628 if (IS_ERR(slave)) 629 return PTR_ERR(slave); 630 631 mutex_lock(&mtd_partitions_mutex); 632 list_add(&slave->list, &mtd_partitions); 633 mutex_unlock(&mtd_partitions_mutex); 634 635 add_mtd_device(&slave->mtd); 636 637 cur_offset = slave->offset + slave->mtd.size; 638 } 639 640 return 0; 641 } 642 643 static DEFINE_SPINLOCK(part_parser_lock); 644 static LIST_HEAD(part_parsers); 645 646 static struct mtd_part_parser *get_partition_parser(const char *name) 647 { 648 struct mtd_part_parser *p, *ret = NULL; 649 650 spin_lock(&part_parser_lock); 651 652 list_for_each_entry(p, &part_parsers, list) 653 if (!strcmp(p->name, name) && try_module_get(p->owner)) { 654 ret = p; 655 break; 656 } 657 658 spin_unlock(&part_parser_lock); 659 660 return ret; 661 } 662 663 #define put_partition_parser(p) do { module_put((p)->owner); } while (0) 664 665 int register_mtd_parser(struct mtd_part_parser *p) 666 { 667 spin_lock(&part_parser_lock); 668 list_add(&p->list, &part_parsers); 669 spin_unlock(&part_parser_lock); 670 671 return 0; 672 } 673 EXPORT_SYMBOL_GPL(register_mtd_parser); 674 675 int deregister_mtd_parser(struct mtd_part_parser *p) 676 { 677 spin_lock(&part_parser_lock); 678 list_del(&p->list); 679 spin_unlock(&part_parser_lock); 680 return 0; 681 } 682 EXPORT_SYMBOL_GPL(deregister_mtd_parser); 683 684 /* 685 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you 686 * are changing this array! 687 */ 688 static const char *default_mtd_part_types[] = { 689 "cmdlinepart", 690 "ofpart", 691 NULL 692 }; 693 694 /** 695 * parse_mtd_partitions - parse MTD partitions 696 * @master: the master partition (describes whole MTD device) 697 * @types: names of partition parsers to try or %NULL 698 * @pparts: array of partitions found is returned here 699 * @data: MTD partition parser-specific data 700 * 701 * This function tries to find partition on MTD device @master. It uses MTD 702 * partition parsers, specified in @types. However, if @types is %NULL, then 703 * the default list of parsers is used. The default list contains only the 704 * "cmdlinepart" and "ofpart" parsers ATM. 705 * 706 * This function may return: 707 * o a negative error code in case of failure 708 * o zero if no partitions were found 709 * o a positive number of found partitions, in which case on exit @pparts will 710 * point to an array containing this number of &struct mtd_info objects. 711 */ 712 int parse_mtd_partitions(struct mtd_info *master, const char **types, 713 struct mtd_partition **pparts, 714 struct mtd_part_parser_data *data) 715 { 716 struct mtd_part_parser *parser; 717 int ret = 0; 718 719 if (!types) 720 types = default_mtd_part_types; 721 722 for ( ; ret <= 0 && *types; types++) { 723 parser = get_partition_parser(*types); 724 if (!parser && !request_module("%s", *types)) 725 parser = get_partition_parser(*types); 726 if (!parser) 727 continue; 728 ret = (*parser->parse_fn)(master, pparts, data); 729 if (ret > 0) { 730 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 731 ret, parser->name, master->name); 732 } 733 put_partition_parser(parser); 734 } 735 return ret; 736 } 737 738 int mtd_is_partition(struct mtd_info *mtd) 739 { 740 struct mtd_part *part; 741 int ispart = 0; 742 743 mutex_lock(&mtd_partitions_mutex); 744 list_for_each_entry(part, &mtd_partitions, list) 745 if (&part->mtd == mtd) { 746 ispart = 1; 747 break; 748 } 749 mutex_unlock(&mtd_partitions_mutex); 750 751 return ispart; 752 } 753 EXPORT_SYMBOL_GPL(mtd_is_partition); 754