1 /* 2 * Simple MTD partitioning layer 3 * 4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> 5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> 6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/types.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/list.h> 29 #include <linux/kmod.h> 30 #include <linux/mtd/mtd.h> 31 #include <linux/mtd/partitions.h> 32 #include <linux/err.h> 33 34 #include "mtdcore.h" 35 36 /* Our partition linked list */ 37 static LIST_HEAD(mtd_partitions); 38 static DEFINE_MUTEX(mtd_partitions_mutex); 39 40 /** 41 * struct mtd_part - our partition node structure 42 * 43 * @mtd: struct holding partition details 44 * @parent: parent mtd - flash device or another partition 45 * @offset: partition offset relative to the *flash device* 46 */ 47 struct mtd_part { 48 struct mtd_info mtd; 49 struct mtd_info *parent; 50 uint64_t offset; 51 struct list_head list; 52 }; 53 54 /* 55 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve 56 * the pointer to that structure. 57 */ 58 static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd) 59 { 60 return container_of(mtd, struct mtd_part, mtd); 61 } 62 63 64 /* 65 * MTD methods which simply translate the effective address and pass through 66 * to the _real_ device. 67 */ 68 69 static int part_read(struct mtd_info *mtd, loff_t from, size_t len, 70 size_t *retlen, u_char *buf) 71 { 72 struct mtd_part *part = mtd_to_part(mtd); 73 struct mtd_ecc_stats stats; 74 int res; 75 76 stats = part->parent->ecc_stats; 77 res = part->parent->_read(part->parent, from + part->offset, len, 78 retlen, buf); 79 if (unlikely(mtd_is_eccerr(res))) 80 mtd->ecc_stats.failed += 81 part->parent->ecc_stats.failed - stats.failed; 82 else 83 mtd->ecc_stats.corrected += 84 part->parent->ecc_stats.corrected - stats.corrected; 85 return res; 86 } 87 88 static int part_point(struct mtd_info *mtd, loff_t from, size_t len, 89 size_t *retlen, void **virt, resource_size_t *phys) 90 { 91 struct mtd_part *part = mtd_to_part(mtd); 92 93 return part->parent->_point(part->parent, from + part->offset, len, 94 retlen, virt, phys); 95 } 96 97 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 98 { 99 struct mtd_part *part = mtd_to_part(mtd); 100 101 return part->parent->_unpoint(part->parent, from + part->offset, len); 102 } 103 104 static int part_read_oob(struct mtd_info *mtd, loff_t from, 105 struct mtd_oob_ops *ops) 106 { 107 struct mtd_part *part = mtd_to_part(mtd); 108 struct mtd_ecc_stats stats; 109 int res; 110 111 stats = part->parent->ecc_stats; 112 res = part->parent->_read_oob(part->parent, from + part->offset, ops); 113 if (unlikely(mtd_is_eccerr(res))) 114 mtd->ecc_stats.failed += 115 part->parent->ecc_stats.failed - stats.failed; 116 else 117 mtd->ecc_stats.corrected += 118 part->parent->ecc_stats.corrected - stats.corrected; 119 return res; 120 } 121 122 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 123 size_t len, size_t *retlen, u_char *buf) 124 { 125 struct mtd_part *part = mtd_to_part(mtd); 126 return part->parent->_read_user_prot_reg(part->parent, from, len, 127 retlen, buf); 128 } 129 130 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len, 131 size_t *retlen, struct otp_info *buf) 132 { 133 struct mtd_part *part = mtd_to_part(mtd); 134 return part->parent->_get_user_prot_info(part->parent, len, retlen, 135 buf); 136 } 137 138 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 139 size_t len, size_t *retlen, u_char *buf) 140 { 141 struct mtd_part *part = mtd_to_part(mtd); 142 return part->parent->_read_fact_prot_reg(part->parent, from, len, 143 retlen, buf); 144 } 145 146 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len, 147 size_t *retlen, struct otp_info *buf) 148 { 149 struct mtd_part *part = mtd_to_part(mtd); 150 return part->parent->_get_fact_prot_info(part->parent, len, retlen, 151 buf); 152 } 153 154 static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 155 size_t *retlen, const u_char *buf) 156 { 157 struct mtd_part *part = mtd_to_part(mtd); 158 return part->parent->_write(part->parent, to + part->offset, len, 159 retlen, buf); 160 } 161 162 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 163 size_t *retlen, const u_char *buf) 164 { 165 struct mtd_part *part = mtd_to_part(mtd); 166 return part->parent->_panic_write(part->parent, to + part->offset, len, 167 retlen, buf); 168 } 169 170 static int part_write_oob(struct mtd_info *mtd, loff_t to, 171 struct mtd_oob_ops *ops) 172 { 173 struct mtd_part *part = mtd_to_part(mtd); 174 175 return part->parent->_write_oob(part->parent, to + part->offset, ops); 176 } 177 178 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 179 size_t len, size_t *retlen, u_char *buf) 180 { 181 struct mtd_part *part = mtd_to_part(mtd); 182 return part->parent->_write_user_prot_reg(part->parent, from, len, 183 retlen, buf); 184 } 185 186 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 187 size_t len) 188 { 189 struct mtd_part *part = mtd_to_part(mtd); 190 return part->parent->_lock_user_prot_reg(part->parent, from, len); 191 } 192 193 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 194 unsigned long count, loff_t to, size_t *retlen) 195 { 196 struct mtd_part *part = mtd_to_part(mtd); 197 return part->parent->_writev(part->parent, vecs, count, 198 to + part->offset, retlen); 199 } 200 201 static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 202 { 203 struct mtd_part *part = mtd_to_part(mtd); 204 int ret; 205 206 instr->addr += part->offset; 207 ret = part->parent->_erase(part->parent, instr); 208 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 209 instr->fail_addr -= part->offset; 210 instr->addr -= part->offset; 211 212 return ret; 213 } 214 215 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 216 { 217 struct mtd_part *part = mtd_to_part(mtd); 218 return part->parent->_lock(part->parent, ofs + part->offset, len); 219 } 220 221 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 222 { 223 struct mtd_part *part = mtd_to_part(mtd); 224 return part->parent->_unlock(part->parent, ofs + part->offset, len); 225 } 226 227 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 228 { 229 struct mtd_part *part = mtd_to_part(mtd); 230 return part->parent->_is_locked(part->parent, ofs + part->offset, len); 231 } 232 233 static void part_sync(struct mtd_info *mtd) 234 { 235 struct mtd_part *part = mtd_to_part(mtd); 236 part->parent->_sync(part->parent); 237 } 238 239 static int part_suspend(struct mtd_info *mtd) 240 { 241 struct mtd_part *part = mtd_to_part(mtd); 242 return part->parent->_suspend(part->parent); 243 } 244 245 static void part_resume(struct mtd_info *mtd) 246 { 247 struct mtd_part *part = mtd_to_part(mtd); 248 part->parent->_resume(part->parent); 249 } 250 251 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs) 252 { 253 struct mtd_part *part = mtd_to_part(mtd); 254 ofs += part->offset; 255 return part->parent->_block_isreserved(part->parent, ofs); 256 } 257 258 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 259 { 260 struct mtd_part *part = mtd_to_part(mtd); 261 ofs += part->offset; 262 return part->parent->_block_isbad(part->parent, ofs); 263 } 264 265 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 266 { 267 struct mtd_part *part = mtd_to_part(mtd); 268 int res; 269 270 ofs += part->offset; 271 res = part->parent->_block_markbad(part->parent, ofs); 272 if (!res) 273 mtd->ecc_stats.badblocks++; 274 return res; 275 } 276 277 static int part_get_device(struct mtd_info *mtd) 278 { 279 struct mtd_part *part = mtd_to_part(mtd); 280 return part->parent->_get_device(part->parent); 281 } 282 283 static void part_put_device(struct mtd_info *mtd) 284 { 285 struct mtd_part *part = mtd_to_part(mtd); 286 part->parent->_put_device(part->parent); 287 } 288 289 static int part_ooblayout_ecc(struct mtd_info *mtd, int section, 290 struct mtd_oob_region *oobregion) 291 { 292 struct mtd_part *part = mtd_to_part(mtd); 293 294 return mtd_ooblayout_ecc(part->parent, section, oobregion); 295 } 296 297 static int part_ooblayout_free(struct mtd_info *mtd, int section, 298 struct mtd_oob_region *oobregion) 299 { 300 struct mtd_part *part = mtd_to_part(mtd); 301 302 return mtd_ooblayout_free(part->parent, section, oobregion); 303 } 304 305 static const struct mtd_ooblayout_ops part_ooblayout_ops = { 306 .ecc = part_ooblayout_ecc, 307 .free = part_ooblayout_free, 308 }; 309 310 static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) 311 { 312 struct mtd_part *part = mtd_to_part(mtd); 313 314 return part->parent->_max_bad_blocks(part->parent, 315 ofs + part->offset, len); 316 } 317 318 static inline void free_partition(struct mtd_part *p) 319 { 320 kfree(p->mtd.name); 321 kfree(p); 322 } 323 324 /** 325 * mtd_parse_part - parse MTD partition looking for subpartitions 326 * 327 * @slave: part that is supposed to be a container and should be parsed 328 * @types: NULL-terminated array with names of partition parsers to try 329 * 330 * Some partitions are kind of containers with extra subpartitions (volumes). 331 * There can be various formats of such containers. This function tries to use 332 * specified parsers to analyze given partition and registers found 333 * subpartitions on success. 334 */ 335 static int mtd_parse_part(struct mtd_part *slave, const char *const *types) 336 { 337 struct mtd_partitions parsed; 338 int err; 339 340 err = parse_mtd_partitions(&slave->mtd, types, &parsed, NULL); 341 if (err) 342 return err; 343 else if (!parsed.nr_parts) 344 return -ENOENT; 345 346 err = add_mtd_partitions(&slave->mtd, parsed.parts, parsed.nr_parts); 347 348 mtd_part_parser_cleanup(&parsed); 349 350 return err; 351 } 352 353 static struct mtd_part *allocate_partition(struct mtd_info *parent, 354 const struct mtd_partition *part, int partno, 355 uint64_t cur_offset) 356 { 357 int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize : 358 parent->erasesize; 359 struct mtd_part *slave; 360 u32 remainder; 361 char *name; 362 u64 tmp; 363 364 /* allocate the partition structure */ 365 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 366 name = kstrdup(part->name, GFP_KERNEL); 367 if (!name || !slave) { 368 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", 369 parent->name); 370 kfree(name); 371 kfree(slave); 372 return ERR_PTR(-ENOMEM); 373 } 374 375 /* set up the MTD object for this partition */ 376 slave->mtd.type = parent->type; 377 slave->mtd.flags = parent->flags & ~part->mask_flags; 378 slave->mtd.size = part->size; 379 slave->mtd.writesize = parent->writesize; 380 slave->mtd.writebufsize = parent->writebufsize; 381 slave->mtd.oobsize = parent->oobsize; 382 slave->mtd.oobavail = parent->oobavail; 383 slave->mtd.subpage_sft = parent->subpage_sft; 384 slave->mtd.pairing = parent->pairing; 385 386 slave->mtd.name = name; 387 slave->mtd.owner = parent->owner; 388 389 /* NOTE: Historically, we didn't arrange MTDs as a tree out of 390 * concern for showing the same data in multiple partitions. 391 * However, it is very useful to have the master node present, 392 * so the MTD_PARTITIONED_MASTER option allows that. The master 393 * will have device nodes etc only if this is set, so make the 394 * parent conditional on that option. Note, this is a way to 395 * distinguish between the master and the partition in sysfs. 396 */ 397 slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? 398 &parent->dev : 399 parent->dev.parent; 400 slave->mtd.dev.of_node = part->of_node; 401 402 if (parent->_read) 403 slave->mtd._read = part_read; 404 if (parent->_write) 405 slave->mtd._write = part_write; 406 407 if (parent->_panic_write) 408 slave->mtd._panic_write = part_panic_write; 409 410 if (parent->_point && parent->_unpoint) { 411 slave->mtd._point = part_point; 412 slave->mtd._unpoint = part_unpoint; 413 } 414 415 if (parent->_read_oob) 416 slave->mtd._read_oob = part_read_oob; 417 if (parent->_write_oob) 418 slave->mtd._write_oob = part_write_oob; 419 if (parent->_read_user_prot_reg) 420 slave->mtd._read_user_prot_reg = part_read_user_prot_reg; 421 if (parent->_read_fact_prot_reg) 422 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; 423 if (parent->_write_user_prot_reg) 424 slave->mtd._write_user_prot_reg = part_write_user_prot_reg; 425 if (parent->_lock_user_prot_reg) 426 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; 427 if (parent->_get_user_prot_info) 428 slave->mtd._get_user_prot_info = part_get_user_prot_info; 429 if (parent->_get_fact_prot_info) 430 slave->mtd._get_fact_prot_info = part_get_fact_prot_info; 431 if (parent->_sync) 432 slave->mtd._sync = part_sync; 433 if (!partno && !parent->dev.class && parent->_suspend && 434 parent->_resume) { 435 slave->mtd._suspend = part_suspend; 436 slave->mtd._resume = part_resume; 437 } 438 if (parent->_writev) 439 slave->mtd._writev = part_writev; 440 if (parent->_lock) 441 slave->mtd._lock = part_lock; 442 if (parent->_unlock) 443 slave->mtd._unlock = part_unlock; 444 if (parent->_is_locked) 445 slave->mtd._is_locked = part_is_locked; 446 if (parent->_block_isreserved) 447 slave->mtd._block_isreserved = part_block_isreserved; 448 if (parent->_block_isbad) 449 slave->mtd._block_isbad = part_block_isbad; 450 if (parent->_block_markbad) 451 slave->mtd._block_markbad = part_block_markbad; 452 if (parent->_max_bad_blocks) 453 slave->mtd._max_bad_blocks = part_max_bad_blocks; 454 455 if (parent->_get_device) 456 slave->mtd._get_device = part_get_device; 457 if (parent->_put_device) 458 slave->mtd._put_device = part_put_device; 459 460 slave->mtd._erase = part_erase; 461 slave->parent = parent; 462 slave->offset = part->offset; 463 464 if (slave->offset == MTDPART_OFS_APPEND) 465 slave->offset = cur_offset; 466 if (slave->offset == MTDPART_OFS_NXTBLK) { 467 tmp = cur_offset; 468 slave->offset = cur_offset; 469 remainder = do_div(tmp, wr_alignment); 470 if (remainder) { 471 slave->offset += wr_alignment - remainder; 472 printk(KERN_NOTICE "Moving partition %d: " 473 "0x%012llx -> 0x%012llx\n", partno, 474 (unsigned long long)cur_offset, (unsigned long long)slave->offset); 475 } 476 } 477 if (slave->offset == MTDPART_OFS_RETAIN) { 478 slave->offset = cur_offset; 479 if (parent->size - slave->offset >= slave->mtd.size) { 480 slave->mtd.size = parent->size - slave->offset 481 - slave->mtd.size; 482 } else { 483 printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", 484 part->name, parent->size - slave->offset, 485 slave->mtd.size); 486 /* register to preserve ordering */ 487 goto out_register; 488 } 489 } 490 if (slave->mtd.size == MTDPART_SIZ_FULL) 491 slave->mtd.size = parent->size - slave->offset; 492 493 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, 494 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); 495 496 /* let's do some sanity checks */ 497 if (slave->offset >= parent->size) { 498 /* let's register it anyway to preserve ordering */ 499 slave->offset = 0; 500 slave->mtd.size = 0; 501 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 502 part->name); 503 goto out_register; 504 } 505 if (slave->offset + slave->mtd.size > parent->size) { 506 slave->mtd.size = parent->size - slave->offset; 507 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", 508 part->name, parent->name, (unsigned long long)slave->mtd.size); 509 } 510 if (parent->numeraseregions > 1) { 511 /* Deal with variable erase size stuff */ 512 int i, max = parent->numeraseregions; 513 u64 end = slave->offset + slave->mtd.size; 514 struct mtd_erase_region_info *regions = parent->eraseregions; 515 516 /* Find the first erase regions which is part of this 517 * partition. */ 518 for (i = 0; i < max && regions[i].offset <= slave->offset; i++) 519 ; 520 /* The loop searched for the region _behind_ the first one */ 521 if (i > 0) 522 i--; 523 524 /* Pick biggest erasesize */ 525 for (; i < max && regions[i].offset < end; i++) { 526 if (slave->mtd.erasesize < regions[i].erasesize) { 527 slave->mtd.erasesize = regions[i].erasesize; 528 } 529 } 530 BUG_ON(slave->mtd.erasesize == 0); 531 } else { 532 /* Single erase size */ 533 slave->mtd.erasesize = parent->erasesize; 534 } 535 536 /* 537 * Slave erasesize might differ from the master one if the master 538 * exposes several regions with different erasesize. Adjust 539 * wr_alignment accordingly. 540 */ 541 if (!(slave->mtd.flags & MTD_NO_ERASE)) 542 wr_alignment = slave->mtd.erasesize; 543 544 tmp = slave->offset; 545 remainder = do_div(tmp, wr_alignment); 546 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { 547 /* Doesn't start on a boundary of major erase size */ 548 /* FIXME: Let it be writable if it is on a boundary of 549 * _minor_ erase size though */ 550 slave->mtd.flags &= ~MTD_WRITEABLE; 551 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", 552 part->name); 553 } 554 555 tmp = slave->mtd.size; 556 remainder = do_div(tmp, wr_alignment); 557 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { 558 slave->mtd.flags &= ~MTD_WRITEABLE; 559 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", 560 part->name); 561 } 562 563 mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops); 564 slave->mtd.ecc_step_size = parent->ecc_step_size; 565 slave->mtd.ecc_strength = parent->ecc_strength; 566 slave->mtd.bitflip_threshold = parent->bitflip_threshold; 567 568 if (parent->_block_isbad) { 569 uint64_t offs = 0; 570 571 while (offs < slave->mtd.size) { 572 if (mtd_block_isreserved(parent, offs + slave->offset)) 573 slave->mtd.ecc_stats.bbtblocks++; 574 else if (mtd_block_isbad(parent, offs + slave->offset)) 575 slave->mtd.ecc_stats.badblocks++; 576 offs += slave->mtd.erasesize; 577 } 578 } 579 580 out_register: 581 return slave; 582 } 583 584 static ssize_t mtd_partition_offset_show(struct device *dev, 585 struct device_attribute *attr, char *buf) 586 { 587 struct mtd_info *mtd = dev_get_drvdata(dev); 588 struct mtd_part *part = mtd_to_part(mtd); 589 return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset); 590 } 591 592 static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL); 593 594 static const struct attribute *mtd_partition_attrs[] = { 595 &dev_attr_offset.attr, 596 NULL 597 }; 598 599 static int mtd_add_partition_attrs(struct mtd_part *new) 600 { 601 int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs); 602 if (ret) 603 printk(KERN_WARNING 604 "mtd: failed to create partition attrs, err=%d\n", ret); 605 return ret; 606 } 607 608 int mtd_add_partition(struct mtd_info *parent, const char *name, 609 long long offset, long long length) 610 { 611 struct mtd_partition part; 612 struct mtd_part *new; 613 int ret = 0; 614 615 /* the direct offset is expected */ 616 if (offset == MTDPART_OFS_APPEND || 617 offset == MTDPART_OFS_NXTBLK) 618 return -EINVAL; 619 620 if (length == MTDPART_SIZ_FULL) 621 length = parent->size - offset; 622 623 if (length <= 0) 624 return -EINVAL; 625 626 memset(&part, 0, sizeof(part)); 627 part.name = name; 628 part.size = length; 629 part.offset = offset; 630 631 new = allocate_partition(parent, &part, -1, offset); 632 if (IS_ERR(new)) 633 return PTR_ERR(new); 634 635 mutex_lock(&mtd_partitions_mutex); 636 list_add(&new->list, &mtd_partitions); 637 mutex_unlock(&mtd_partitions_mutex); 638 639 add_mtd_device(&new->mtd); 640 641 mtd_add_partition_attrs(new); 642 643 return ret; 644 } 645 EXPORT_SYMBOL_GPL(mtd_add_partition); 646 647 /** 648 * __mtd_del_partition - delete MTD partition 649 * 650 * @priv: internal MTD struct for partition to be deleted 651 * 652 * This function must be called with the partitions mutex locked. 653 */ 654 static int __mtd_del_partition(struct mtd_part *priv) 655 { 656 struct mtd_part *child, *next; 657 int err; 658 659 list_for_each_entry_safe(child, next, &mtd_partitions, list) { 660 if (child->parent == &priv->mtd) { 661 err = __mtd_del_partition(child); 662 if (err) 663 return err; 664 } 665 } 666 667 sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs); 668 669 err = del_mtd_device(&priv->mtd); 670 if (err) 671 return err; 672 673 list_del(&priv->list); 674 free_partition(priv); 675 676 return 0; 677 } 678 679 /* 680 * This function unregisters and destroy all slave MTD objects which are 681 * attached to the given MTD object. 682 */ 683 int del_mtd_partitions(struct mtd_info *mtd) 684 { 685 struct mtd_part *slave, *next; 686 int ret, err = 0; 687 688 mutex_lock(&mtd_partitions_mutex); 689 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 690 if (slave->parent == mtd) { 691 ret = __mtd_del_partition(slave); 692 if (ret < 0) 693 err = ret; 694 } 695 mutex_unlock(&mtd_partitions_mutex); 696 697 return err; 698 } 699 700 int mtd_del_partition(struct mtd_info *mtd, int partno) 701 { 702 struct mtd_part *slave, *next; 703 int ret = -EINVAL; 704 705 mutex_lock(&mtd_partitions_mutex); 706 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 707 if ((slave->parent == mtd) && 708 (slave->mtd.index == partno)) { 709 ret = __mtd_del_partition(slave); 710 break; 711 } 712 mutex_unlock(&mtd_partitions_mutex); 713 714 return ret; 715 } 716 EXPORT_SYMBOL_GPL(mtd_del_partition); 717 718 /* 719 * This function, given a master MTD object and a partition table, creates 720 * and registers slave MTD objects which are bound to the master according to 721 * the partition definitions. 722 * 723 * For historical reasons, this function's caller only registers the master 724 * if the MTD_PARTITIONED_MASTER config option is set. 725 */ 726 727 int add_mtd_partitions(struct mtd_info *master, 728 const struct mtd_partition *parts, 729 int nbparts) 730 { 731 struct mtd_part *slave; 732 uint64_t cur_offset = 0; 733 int i; 734 735 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 736 737 for (i = 0; i < nbparts; i++) { 738 slave = allocate_partition(master, parts + i, i, cur_offset); 739 if (IS_ERR(slave)) { 740 del_mtd_partitions(master); 741 return PTR_ERR(slave); 742 } 743 744 mutex_lock(&mtd_partitions_mutex); 745 list_add(&slave->list, &mtd_partitions); 746 mutex_unlock(&mtd_partitions_mutex); 747 748 add_mtd_device(&slave->mtd); 749 mtd_add_partition_attrs(slave); 750 if (parts[i].types) 751 mtd_parse_part(slave, parts[i].types); 752 753 cur_offset = slave->offset + slave->mtd.size; 754 } 755 756 return 0; 757 } 758 759 static DEFINE_SPINLOCK(part_parser_lock); 760 static LIST_HEAD(part_parsers); 761 762 static struct mtd_part_parser *mtd_part_parser_get(const char *name) 763 { 764 struct mtd_part_parser *p, *ret = NULL; 765 766 spin_lock(&part_parser_lock); 767 768 list_for_each_entry(p, &part_parsers, list) 769 if (!strcmp(p->name, name) && try_module_get(p->owner)) { 770 ret = p; 771 break; 772 } 773 774 spin_unlock(&part_parser_lock); 775 776 return ret; 777 } 778 779 static inline void mtd_part_parser_put(const struct mtd_part_parser *p) 780 { 781 module_put(p->owner); 782 } 783 784 /* 785 * Many partition parsers just expected the core to kfree() all their data in 786 * one chunk. Do that by default. 787 */ 788 static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts, 789 int nr_parts) 790 { 791 kfree(pparts); 792 } 793 794 int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner) 795 { 796 p->owner = owner; 797 798 if (!p->cleanup) 799 p->cleanup = &mtd_part_parser_cleanup_default; 800 801 spin_lock(&part_parser_lock); 802 list_add(&p->list, &part_parsers); 803 spin_unlock(&part_parser_lock); 804 805 return 0; 806 } 807 EXPORT_SYMBOL_GPL(__register_mtd_parser); 808 809 void deregister_mtd_parser(struct mtd_part_parser *p) 810 { 811 spin_lock(&part_parser_lock); 812 list_del(&p->list); 813 spin_unlock(&part_parser_lock); 814 } 815 EXPORT_SYMBOL_GPL(deregister_mtd_parser); 816 817 /* 818 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you 819 * are changing this array! 820 */ 821 static const char * const default_mtd_part_types[] = { 822 "cmdlinepart", 823 "ofpart", 824 NULL 825 }; 826 827 static int mtd_part_do_parse(struct mtd_part_parser *parser, 828 struct mtd_info *master, 829 struct mtd_partitions *pparts, 830 struct mtd_part_parser_data *data) 831 { 832 int ret; 833 834 ret = (*parser->parse_fn)(master, &pparts->parts, data); 835 pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret); 836 if (ret <= 0) 837 return ret; 838 839 pr_notice("%d %s partitions found on MTD device %s\n", ret, 840 parser->name, master->name); 841 842 pparts->nr_parts = ret; 843 pparts->parser = parser; 844 845 return ret; 846 } 847 848 /** 849 * parse_mtd_partitions - parse MTD partitions 850 * @master: the master partition (describes whole MTD device) 851 * @types: names of partition parsers to try or %NULL 852 * @pparts: info about partitions found is returned here 853 * @data: MTD partition parser-specific data 854 * 855 * This function tries to find partition on MTD device @master. It uses MTD 856 * partition parsers, specified in @types. However, if @types is %NULL, then 857 * the default list of parsers is used. The default list contains only the 858 * "cmdlinepart" and "ofpart" parsers ATM. 859 * Note: If there are more then one parser in @types, the kernel only takes the 860 * partitions parsed out by the first parser. 861 * 862 * This function may return: 863 * o a negative error code in case of failure 864 * o zero otherwise, and @pparts will describe the partitions, number of 865 * partitions, and the parser which parsed them. Caller must release 866 * resources with mtd_part_parser_cleanup() when finished with the returned 867 * data. 868 */ 869 int parse_mtd_partitions(struct mtd_info *master, const char *const *types, 870 struct mtd_partitions *pparts, 871 struct mtd_part_parser_data *data) 872 { 873 struct mtd_part_parser *parser; 874 int ret, err = 0; 875 876 if (!types) 877 types = default_mtd_part_types; 878 879 for ( ; *types; types++) { 880 pr_debug("%s: parsing partitions %s\n", master->name, *types); 881 parser = mtd_part_parser_get(*types); 882 if (!parser && !request_module("%s", *types)) 883 parser = mtd_part_parser_get(*types); 884 pr_debug("%s: got parser %s\n", master->name, 885 parser ? parser->name : NULL); 886 if (!parser) 887 continue; 888 ret = mtd_part_do_parse(parser, master, pparts, data); 889 /* Found partitions! */ 890 if (ret > 0) 891 return 0; 892 mtd_part_parser_put(parser); 893 /* 894 * Stash the first error we see; only report it if no parser 895 * succeeds 896 */ 897 if (ret < 0 && !err) 898 err = ret; 899 } 900 return err; 901 } 902 903 void mtd_part_parser_cleanup(struct mtd_partitions *parts) 904 { 905 const struct mtd_part_parser *parser; 906 907 if (!parts) 908 return; 909 910 parser = parts->parser; 911 if (parser) { 912 if (parser->cleanup) 913 parser->cleanup(parts->parts, parts->nr_parts); 914 915 mtd_part_parser_put(parser); 916 } 917 } 918 919 int mtd_is_partition(const struct mtd_info *mtd) 920 { 921 struct mtd_part *part; 922 int ispart = 0; 923 924 mutex_lock(&mtd_partitions_mutex); 925 list_for_each_entry(part, &mtd_partitions, list) 926 if (&part->mtd == mtd) { 927 ispart = 1; 928 break; 929 } 930 mutex_unlock(&mtd_partitions_mutex); 931 932 return ispart; 933 } 934 EXPORT_SYMBOL_GPL(mtd_is_partition); 935 936 /* Returns the size of the entire flash chip */ 937 uint64_t mtd_get_device_size(const struct mtd_info *mtd) 938 { 939 if (!mtd_is_partition(mtd)) 940 return mtd->size; 941 942 return mtd_get_device_size(mtd_to_part(mtd)->parent); 943 } 944 EXPORT_SYMBOL_GPL(mtd_get_device_size); 945