1 /* 2 * Simple MTD partitioning layer 3 * 4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> 5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> 6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/types.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/list.h> 29 #include <linux/kmod.h> 30 #include <linux/mtd/mtd.h> 31 #include <linux/mtd/partitions.h> 32 #include <linux/err.h> 33 34 #include "mtdcore.h" 35 36 /* Our partition linked list */ 37 static LIST_HEAD(mtd_partitions); 38 static DEFINE_MUTEX(mtd_partitions_mutex); 39 40 /** 41 * struct mtd_part - our partition node structure 42 * 43 * @mtd: struct holding partition details 44 * @parent: parent mtd - flash device or another partition 45 * @offset: partition offset relative to the *flash device* 46 */ 47 struct mtd_part { 48 struct mtd_info mtd; 49 struct mtd_info *parent; 50 uint64_t offset; 51 struct list_head list; 52 }; 53 54 /* 55 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve 56 * the pointer to that structure. 57 */ 58 static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd) 59 { 60 return container_of(mtd, struct mtd_part, mtd); 61 } 62 63 64 /* 65 * MTD methods which simply translate the effective address and pass through 66 * to the _real_ device. 67 */ 68 69 static int part_read(struct mtd_info *mtd, loff_t from, size_t len, 70 size_t *retlen, u_char *buf) 71 { 72 struct mtd_part *part = mtd_to_part(mtd); 73 struct mtd_ecc_stats stats; 74 int res; 75 76 stats = part->parent->ecc_stats; 77 res = part->parent->_read(part->parent, from + part->offset, len, 78 retlen, buf); 79 if (unlikely(mtd_is_eccerr(res))) 80 mtd->ecc_stats.failed += 81 part->parent->ecc_stats.failed - stats.failed; 82 else 83 mtd->ecc_stats.corrected += 84 part->parent->ecc_stats.corrected - stats.corrected; 85 return res; 86 } 87 88 static int part_point(struct mtd_info *mtd, loff_t from, size_t len, 89 size_t *retlen, void **virt, resource_size_t *phys) 90 { 91 struct mtd_part *part = mtd_to_part(mtd); 92 93 return part->parent->_point(part->parent, from + part->offset, len, 94 retlen, virt, phys); 95 } 96 97 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 98 { 99 struct mtd_part *part = mtd_to_part(mtd); 100 101 return part->parent->_unpoint(part->parent, from + part->offset, len); 102 } 103 104 static int part_read_oob(struct mtd_info *mtd, loff_t from, 105 struct mtd_oob_ops *ops) 106 { 107 struct mtd_part *part = mtd_to_part(mtd); 108 int res; 109 110 if (from >= mtd->size) 111 return -EINVAL; 112 if (ops->datbuf && from + ops->len > mtd->size) 113 return -EINVAL; 114 115 /* 116 * If OOB is also requested, make sure that we do not read past the end 117 * of this partition. 118 */ 119 if (ops->oobbuf) { 120 size_t len, pages; 121 122 len = mtd_oobavail(mtd, ops); 123 pages = mtd_div_by_ws(mtd->size, mtd); 124 pages -= mtd_div_by_ws(from, mtd); 125 if (ops->ooboffs + ops->ooblen > pages * len) 126 return -EINVAL; 127 } 128 129 res = part->parent->_read_oob(part->parent, from + part->offset, ops); 130 if (unlikely(res)) { 131 if (mtd_is_bitflip(res)) 132 mtd->ecc_stats.corrected++; 133 if (mtd_is_eccerr(res)) 134 mtd->ecc_stats.failed++; 135 } 136 return res; 137 } 138 139 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 140 size_t len, size_t *retlen, u_char *buf) 141 { 142 struct mtd_part *part = mtd_to_part(mtd); 143 return part->parent->_read_user_prot_reg(part->parent, from, len, 144 retlen, buf); 145 } 146 147 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len, 148 size_t *retlen, struct otp_info *buf) 149 { 150 struct mtd_part *part = mtd_to_part(mtd); 151 return part->parent->_get_user_prot_info(part->parent, len, retlen, 152 buf); 153 } 154 155 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 156 size_t len, size_t *retlen, u_char *buf) 157 { 158 struct mtd_part *part = mtd_to_part(mtd); 159 return part->parent->_read_fact_prot_reg(part->parent, from, len, 160 retlen, buf); 161 } 162 163 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len, 164 size_t *retlen, struct otp_info *buf) 165 { 166 struct mtd_part *part = mtd_to_part(mtd); 167 return part->parent->_get_fact_prot_info(part->parent, len, retlen, 168 buf); 169 } 170 171 static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 172 size_t *retlen, const u_char *buf) 173 { 174 struct mtd_part *part = mtd_to_part(mtd); 175 return part->parent->_write(part->parent, to + part->offset, len, 176 retlen, buf); 177 } 178 179 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 180 size_t *retlen, const u_char *buf) 181 { 182 struct mtd_part *part = mtd_to_part(mtd); 183 return part->parent->_panic_write(part->parent, to + part->offset, len, 184 retlen, buf); 185 } 186 187 static int part_write_oob(struct mtd_info *mtd, loff_t to, 188 struct mtd_oob_ops *ops) 189 { 190 struct mtd_part *part = mtd_to_part(mtd); 191 192 if (to >= mtd->size) 193 return -EINVAL; 194 if (ops->datbuf && to + ops->len > mtd->size) 195 return -EINVAL; 196 return part->parent->_write_oob(part->parent, to + part->offset, ops); 197 } 198 199 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 200 size_t len, size_t *retlen, u_char *buf) 201 { 202 struct mtd_part *part = mtd_to_part(mtd); 203 return part->parent->_write_user_prot_reg(part->parent, from, len, 204 retlen, buf); 205 } 206 207 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 208 size_t len) 209 { 210 struct mtd_part *part = mtd_to_part(mtd); 211 return part->parent->_lock_user_prot_reg(part->parent, from, len); 212 } 213 214 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 215 unsigned long count, loff_t to, size_t *retlen) 216 { 217 struct mtd_part *part = mtd_to_part(mtd); 218 return part->parent->_writev(part->parent, vecs, count, 219 to + part->offset, retlen); 220 } 221 222 static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 223 { 224 struct mtd_part *part = mtd_to_part(mtd); 225 int ret; 226 227 instr->addr += part->offset; 228 ret = part->parent->_erase(part->parent, instr); 229 if (ret) { 230 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 231 instr->fail_addr -= part->offset; 232 instr->addr -= part->offset; 233 } 234 return ret; 235 } 236 237 void mtd_erase_callback(struct erase_info *instr) 238 { 239 if (instr->mtd->_erase == part_erase) { 240 struct mtd_part *part = mtd_to_part(instr->mtd); 241 242 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 243 instr->fail_addr -= part->offset; 244 instr->addr -= part->offset; 245 } 246 if (instr->callback) 247 instr->callback(instr); 248 } 249 EXPORT_SYMBOL_GPL(mtd_erase_callback); 250 251 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 252 { 253 struct mtd_part *part = mtd_to_part(mtd); 254 return part->parent->_lock(part->parent, ofs + part->offset, len); 255 } 256 257 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 258 { 259 struct mtd_part *part = mtd_to_part(mtd); 260 return part->parent->_unlock(part->parent, ofs + part->offset, len); 261 } 262 263 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 264 { 265 struct mtd_part *part = mtd_to_part(mtd); 266 return part->parent->_is_locked(part->parent, ofs + part->offset, len); 267 } 268 269 static void part_sync(struct mtd_info *mtd) 270 { 271 struct mtd_part *part = mtd_to_part(mtd); 272 part->parent->_sync(part->parent); 273 } 274 275 static int part_suspend(struct mtd_info *mtd) 276 { 277 struct mtd_part *part = mtd_to_part(mtd); 278 return part->parent->_suspend(part->parent); 279 } 280 281 static void part_resume(struct mtd_info *mtd) 282 { 283 struct mtd_part *part = mtd_to_part(mtd); 284 part->parent->_resume(part->parent); 285 } 286 287 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs) 288 { 289 struct mtd_part *part = mtd_to_part(mtd); 290 ofs += part->offset; 291 return part->parent->_block_isreserved(part->parent, ofs); 292 } 293 294 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 295 { 296 struct mtd_part *part = mtd_to_part(mtd); 297 ofs += part->offset; 298 return part->parent->_block_isbad(part->parent, ofs); 299 } 300 301 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 302 { 303 struct mtd_part *part = mtd_to_part(mtd); 304 int res; 305 306 ofs += part->offset; 307 res = part->parent->_block_markbad(part->parent, ofs); 308 if (!res) 309 mtd->ecc_stats.badblocks++; 310 return res; 311 } 312 313 static int part_get_device(struct mtd_info *mtd) 314 { 315 struct mtd_part *part = mtd_to_part(mtd); 316 return part->parent->_get_device(part->parent); 317 } 318 319 static void part_put_device(struct mtd_info *mtd) 320 { 321 struct mtd_part *part = mtd_to_part(mtd); 322 part->parent->_put_device(part->parent); 323 } 324 325 static int part_ooblayout_ecc(struct mtd_info *mtd, int section, 326 struct mtd_oob_region *oobregion) 327 { 328 struct mtd_part *part = mtd_to_part(mtd); 329 330 return mtd_ooblayout_ecc(part->parent, section, oobregion); 331 } 332 333 static int part_ooblayout_free(struct mtd_info *mtd, int section, 334 struct mtd_oob_region *oobregion) 335 { 336 struct mtd_part *part = mtd_to_part(mtd); 337 338 return mtd_ooblayout_free(part->parent, section, oobregion); 339 } 340 341 static const struct mtd_ooblayout_ops part_ooblayout_ops = { 342 .ecc = part_ooblayout_ecc, 343 .free = part_ooblayout_free, 344 }; 345 346 static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) 347 { 348 struct mtd_part *part = mtd_to_part(mtd); 349 350 return part->parent->_max_bad_blocks(part->parent, 351 ofs + part->offset, len); 352 } 353 354 static inline void free_partition(struct mtd_part *p) 355 { 356 kfree(p->mtd.name); 357 kfree(p); 358 } 359 360 /** 361 * mtd_parse_part - parse MTD partition looking for subpartitions 362 * 363 * @slave: part that is supposed to be a container and should be parsed 364 * @types: NULL-terminated array with names of partition parsers to try 365 * 366 * Some partitions are kind of containers with extra subpartitions (volumes). 367 * There can be various formats of such containers. This function tries to use 368 * specified parsers to analyze given partition and registers found 369 * subpartitions on success. 370 */ 371 static int mtd_parse_part(struct mtd_part *slave, const char *const *types) 372 { 373 struct mtd_partitions parsed; 374 int err; 375 376 err = parse_mtd_partitions(&slave->mtd, types, &parsed, NULL); 377 if (err) 378 return err; 379 else if (!parsed.nr_parts) 380 return -ENOENT; 381 382 err = add_mtd_partitions(&slave->mtd, parsed.parts, parsed.nr_parts); 383 384 mtd_part_parser_cleanup(&parsed); 385 386 return err; 387 } 388 389 static struct mtd_part *allocate_partition(struct mtd_info *parent, 390 const struct mtd_partition *part, int partno, 391 uint64_t cur_offset) 392 { 393 int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize : 394 parent->erasesize; 395 struct mtd_part *slave; 396 u32 remainder; 397 char *name; 398 u64 tmp; 399 400 /* allocate the partition structure */ 401 slave = kzalloc(sizeof(*slave), GFP_KERNEL); 402 name = kstrdup(part->name, GFP_KERNEL); 403 if (!name || !slave) { 404 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", 405 parent->name); 406 kfree(name); 407 kfree(slave); 408 return ERR_PTR(-ENOMEM); 409 } 410 411 /* set up the MTD object for this partition */ 412 slave->mtd.type = parent->type; 413 slave->mtd.flags = parent->flags & ~part->mask_flags; 414 slave->mtd.size = part->size; 415 slave->mtd.writesize = parent->writesize; 416 slave->mtd.writebufsize = parent->writebufsize; 417 slave->mtd.oobsize = parent->oobsize; 418 slave->mtd.oobavail = parent->oobavail; 419 slave->mtd.subpage_sft = parent->subpage_sft; 420 slave->mtd.pairing = parent->pairing; 421 422 slave->mtd.name = name; 423 slave->mtd.owner = parent->owner; 424 425 /* NOTE: Historically, we didn't arrange MTDs as a tree out of 426 * concern for showing the same data in multiple partitions. 427 * However, it is very useful to have the master node present, 428 * so the MTD_PARTITIONED_MASTER option allows that. The master 429 * will have device nodes etc only if this is set, so make the 430 * parent conditional on that option. Note, this is a way to 431 * distinguish between the master and the partition in sysfs. 432 */ 433 slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? 434 &parent->dev : 435 parent->dev.parent; 436 slave->mtd.dev.of_node = part->of_node; 437 438 slave->mtd._read = part_read; 439 slave->mtd._write = part_write; 440 441 if (parent->_panic_write) 442 slave->mtd._panic_write = part_panic_write; 443 444 if (parent->_point && parent->_unpoint) { 445 slave->mtd._point = part_point; 446 slave->mtd._unpoint = part_unpoint; 447 } 448 449 if (parent->_read_oob) 450 slave->mtd._read_oob = part_read_oob; 451 if (parent->_write_oob) 452 slave->mtd._write_oob = part_write_oob; 453 if (parent->_read_user_prot_reg) 454 slave->mtd._read_user_prot_reg = part_read_user_prot_reg; 455 if (parent->_read_fact_prot_reg) 456 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; 457 if (parent->_write_user_prot_reg) 458 slave->mtd._write_user_prot_reg = part_write_user_prot_reg; 459 if (parent->_lock_user_prot_reg) 460 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; 461 if (parent->_get_user_prot_info) 462 slave->mtd._get_user_prot_info = part_get_user_prot_info; 463 if (parent->_get_fact_prot_info) 464 slave->mtd._get_fact_prot_info = part_get_fact_prot_info; 465 if (parent->_sync) 466 slave->mtd._sync = part_sync; 467 if (!partno && !parent->dev.class && parent->_suspend && 468 parent->_resume) { 469 slave->mtd._suspend = part_suspend; 470 slave->mtd._resume = part_resume; 471 } 472 if (parent->_writev) 473 slave->mtd._writev = part_writev; 474 if (parent->_lock) 475 slave->mtd._lock = part_lock; 476 if (parent->_unlock) 477 slave->mtd._unlock = part_unlock; 478 if (parent->_is_locked) 479 slave->mtd._is_locked = part_is_locked; 480 if (parent->_block_isreserved) 481 slave->mtd._block_isreserved = part_block_isreserved; 482 if (parent->_block_isbad) 483 slave->mtd._block_isbad = part_block_isbad; 484 if (parent->_block_markbad) 485 slave->mtd._block_markbad = part_block_markbad; 486 if (parent->_max_bad_blocks) 487 slave->mtd._max_bad_blocks = part_max_bad_blocks; 488 489 if (parent->_get_device) 490 slave->mtd._get_device = part_get_device; 491 if (parent->_put_device) 492 slave->mtd._put_device = part_put_device; 493 494 slave->mtd._erase = part_erase; 495 slave->parent = parent; 496 slave->offset = part->offset; 497 498 if (slave->offset == MTDPART_OFS_APPEND) 499 slave->offset = cur_offset; 500 if (slave->offset == MTDPART_OFS_NXTBLK) { 501 tmp = cur_offset; 502 slave->offset = cur_offset; 503 remainder = do_div(tmp, wr_alignment); 504 if (remainder) { 505 slave->offset += wr_alignment - remainder; 506 printk(KERN_NOTICE "Moving partition %d: " 507 "0x%012llx -> 0x%012llx\n", partno, 508 (unsigned long long)cur_offset, (unsigned long long)slave->offset); 509 } 510 } 511 if (slave->offset == MTDPART_OFS_RETAIN) { 512 slave->offset = cur_offset; 513 if (parent->size - slave->offset >= slave->mtd.size) { 514 slave->mtd.size = parent->size - slave->offset 515 - slave->mtd.size; 516 } else { 517 printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", 518 part->name, parent->size - slave->offset, 519 slave->mtd.size); 520 /* register to preserve ordering */ 521 goto out_register; 522 } 523 } 524 if (slave->mtd.size == MTDPART_SIZ_FULL) 525 slave->mtd.size = parent->size - slave->offset; 526 527 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, 528 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); 529 530 /* let's do some sanity checks */ 531 if (slave->offset >= parent->size) { 532 /* let's register it anyway to preserve ordering */ 533 slave->offset = 0; 534 slave->mtd.size = 0; 535 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 536 part->name); 537 goto out_register; 538 } 539 if (slave->offset + slave->mtd.size > parent->size) { 540 slave->mtd.size = parent->size - slave->offset; 541 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", 542 part->name, parent->name, (unsigned long long)slave->mtd.size); 543 } 544 if (parent->numeraseregions > 1) { 545 /* Deal with variable erase size stuff */ 546 int i, max = parent->numeraseregions; 547 u64 end = slave->offset + slave->mtd.size; 548 struct mtd_erase_region_info *regions = parent->eraseregions; 549 550 /* Find the first erase regions which is part of this 551 * partition. */ 552 for (i = 0; i < max && regions[i].offset <= slave->offset; i++) 553 ; 554 /* The loop searched for the region _behind_ the first one */ 555 if (i > 0) 556 i--; 557 558 /* Pick biggest erasesize */ 559 for (; i < max && regions[i].offset < end; i++) { 560 if (slave->mtd.erasesize < regions[i].erasesize) { 561 slave->mtd.erasesize = regions[i].erasesize; 562 } 563 } 564 BUG_ON(slave->mtd.erasesize == 0); 565 } else { 566 /* Single erase size */ 567 slave->mtd.erasesize = parent->erasesize; 568 } 569 570 /* 571 * Slave erasesize might differ from the master one if the master 572 * exposes several regions with different erasesize. Adjust 573 * wr_alignment accordingly. 574 */ 575 if (!(slave->mtd.flags & MTD_NO_ERASE)) 576 wr_alignment = slave->mtd.erasesize; 577 578 tmp = slave->offset; 579 remainder = do_div(tmp, wr_alignment); 580 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { 581 /* Doesn't start on a boundary of major erase size */ 582 /* FIXME: Let it be writable if it is on a boundary of 583 * _minor_ erase size though */ 584 slave->mtd.flags &= ~MTD_WRITEABLE; 585 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", 586 part->name); 587 } 588 589 tmp = slave->mtd.size; 590 remainder = do_div(tmp, wr_alignment); 591 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { 592 slave->mtd.flags &= ~MTD_WRITEABLE; 593 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", 594 part->name); 595 } 596 597 mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops); 598 slave->mtd.ecc_step_size = parent->ecc_step_size; 599 slave->mtd.ecc_strength = parent->ecc_strength; 600 slave->mtd.bitflip_threshold = parent->bitflip_threshold; 601 602 if (parent->_block_isbad) { 603 uint64_t offs = 0; 604 605 while (offs < slave->mtd.size) { 606 if (mtd_block_isreserved(parent, offs + slave->offset)) 607 slave->mtd.ecc_stats.bbtblocks++; 608 else if (mtd_block_isbad(parent, offs + slave->offset)) 609 slave->mtd.ecc_stats.badblocks++; 610 offs += slave->mtd.erasesize; 611 } 612 } 613 614 out_register: 615 return slave; 616 } 617 618 static ssize_t mtd_partition_offset_show(struct device *dev, 619 struct device_attribute *attr, char *buf) 620 { 621 struct mtd_info *mtd = dev_get_drvdata(dev); 622 struct mtd_part *part = mtd_to_part(mtd); 623 return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset); 624 } 625 626 static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL); 627 628 static const struct attribute *mtd_partition_attrs[] = { 629 &dev_attr_offset.attr, 630 NULL 631 }; 632 633 static int mtd_add_partition_attrs(struct mtd_part *new) 634 { 635 int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs); 636 if (ret) 637 printk(KERN_WARNING 638 "mtd: failed to create partition attrs, err=%d\n", ret); 639 return ret; 640 } 641 642 int mtd_add_partition(struct mtd_info *parent, const char *name, 643 long long offset, long long length) 644 { 645 struct mtd_partition part; 646 struct mtd_part *new; 647 int ret = 0; 648 649 /* the direct offset is expected */ 650 if (offset == MTDPART_OFS_APPEND || 651 offset == MTDPART_OFS_NXTBLK) 652 return -EINVAL; 653 654 if (length == MTDPART_SIZ_FULL) 655 length = parent->size - offset; 656 657 if (length <= 0) 658 return -EINVAL; 659 660 memset(&part, 0, sizeof(part)); 661 part.name = name; 662 part.size = length; 663 part.offset = offset; 664 665 new = allocate_partition(parent, &part, -1, offset); 666 if (IS_ERR(new)) 667 return PTR_ERR(new); 668 669 mutex_lock(&mtd_partitions_mutex); 670 list_add(&new->list, &mtd_partitions); 671 mutex_unlock(&mtd_partitions_mutex); 672 673 add_mtd_device(&new->mtd); 674 675 mtd_add_partition_attrs(new); 676 677 return ret; 678 } 679 EXPORT_SYMBOL_GPL(mtd_add_partition); 680 681 /** 682 * __mtd_del_partition - delete MTD partition 683 * 684 * @priv: internal MTD struct for partition to be deleted 685 * 686 * This function must be called with the partitions mutex locked. 687 */ 688 static int __mtd_del_partition(struct mtd_part *priv) 689 { 690 struct mtd_part *child, *next; 691 int err; 692 693 list_for_each_entry_safe(child, next, &mtd_partitions, list) { 694 if (child->parent == &priv->mtd) { 695 err = __mtd_del_partition(child); 696 if (err) 697 return err; 698 } 699 } 700 701 sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs); 702 703 err = del_mtd_device(&priv->mtd); 704 if (err) 705 return err; 706 707 list_del(&priv->list); 708 free_partition(priv); 709 710 return 0; 711 } 712 713 /* 714 * This function unregisters and destroy all slave MTD objects which are 715 * attached to the given MTD object. 716 */ 717 int del_mtd_partitions(struct mtd_info *mtd) 718 { 719 struct mtd_part *slave, *next; 720 int ret, err = 0; 721 722 mutex_lock(&mtd_partitions_mutex); 723 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 724 if (slave->parent == mtd) { 725 ret = __mtd_del_partition(slave); 726 if (ret < 0) 727 err = ret; 728 } 729 mutex_unlock(&mtd_partitions_mutex); 730 731 return err; 732 } 733 734 int mtd_del_partition(struct mtd_info *mtd, int partno) 735 { 736 struct mtd_part *slave, *next; 737 int ret = -EINVAL; 738 739 mutex_lock(&mtd_partitions_mutex); 740 list_for_each_entry_safe(slave, next, &mtd_partitions, list) 741 if ((slave->parent == mtd) && 742 (slave->mtd.index == partno)) { 743 ret = __mtd_del_partition(slave); 744 break; 745 } 746 mutex_unlock(&mtd_partitions_mutex); 747 748 return ret; 749 } 750 EXPORT_SYMBOL_GPL(mtd_del_partition); 751 752 /* 753 * This function, given a master MTD object and a partition table, creates 754 * and registers slave MTD objects which are bound to the master according to 755 * the partition definitions. 756 * 757 * For historical reasons, this function's caller only registers the master 758 * if the MTD_PARTITIONED_MASTER config option is set. 759 */ 760 761 int add_mtd_partitions(struct mtd_info *master, 762 const struct mtd_partition *parts, 763 int nbparts) 764 { 765 struct mtd_part *slave; 766 uint64_t cur_offset = 0; 767 int i; 768 769 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 770 771 for (i = 0; i < nbparts; i++) { 772 slave = allocate_partition(master, parts + i, i, cur_offset); 773 if (IS_ERR(slave)) { 774 del_mtd_partitions(master); 775 return PTR_ERR(slave); 776 } 777 778 mutex_lock(&mtd_partitions_mutex); 779 list_add(&slave->list, &mtd_partitions); 780 mutex_unlock(&mtd_partitions_mutex); 781 782 add_mtd_device(&slave->mtd); 783 mtd_add_partition_attrs(slave); 784 if (parts[i].types) 785 mtd_parse_part(slave, parts[i].types); 786 787 cur_offset = slave->offset + slave->mtd.size; 788 } 789 790 return 0; 791 } 792 793 static DEFINE_SPINLOCK(part_parser_lock); 794 static LIST_HEAD(part_parsers); 795 796 static struct mtd_part_parser *mtd_part_parser_get(const char *name) 797 { 798 struct mtd_part_parser *p, *ret = NULL; 799 800 spin_lock(&part_parser_lock); 801 802 list_for_each_entry(p, &part_parsers, list) 803 if (!strcmp(p->name, name) && try_module_get(p->owner)) { 804 ret = p; 805 break; 806 } 807 808 spin_unlock(&part_parser_lock); 809 810 return ret; 811 } 812 813 static inline void mtd_part_parser_put(const struct mtd_part_parser *p) 814 { 815 module_put(p->owner); 816 } 817 818 /* 819 * Many partition parsers just expected the core to kfree() all their data in 820 * one chunk. Do that by default. 821 */ 822 static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts, 823 int nr_parts) 824 { 825 kfree(pparts); 826 } 827 828 int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner) 829 { 830 p->owner = owner; 831 832 if (!p->cleanup) 833 p->cleanup = &mtd_part_parser_cleanup_default; 834 835 spin_lock(&part_parser_lock); 836 list_add(&p->list, &part_parsers); 837 spin_unlock(&part_parser_lock); 838 839 return 0; 840 } 841 EXPORT_SYMBOL_GPL(__register_mtd_parser); 842 843 void deregister_mtd_parser(struct mtd_part_parser *p) 844 { 845 spin_lock(&part_parser_lock); 846 list_del(&p->list); 847 spin_unlock(&part_parser_lock); 848 } 849 EXPORT_SYMBOL_GPL(deregister_mtd_parser); 850 851 /* 852 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you 853 * are changing this array! 854 */ 855 static const char * const default_mtd_part_types[] = { 856 "cmdlinepart", 857 "ofpart", 858 NULL 859 }; 860 861 static int mtd_part_do_parse(struct mtd_part_parser *parser, 862 struct mtd_info *master, 863 struct mtd_partitions *pparts, 864 struct mtd_part_parser_data *data) 865 { 866 int ret; 867 868 ret = (*parser->parse_fn)(master, &pparts->parts, data); 869 pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret); 870 if (ret <= 0) 871 return ret; 872 873 pr_notice("%d %s partitions found on MTD device %s\n", ret, 874 parser->name, master->name); 875 876 pparts->nr_parts = ret; 877 pparts->parser = parser; 878 879 return ret; 880 } 881 882 /** 883 * parse_mtd_partitions - parse MTD partitions 884 * @master: the master partition (describes whole MTD device) 885 * @types: names of partition parsers to try or %NULL 886 * @pparts: info about partitions found is returned here 887 * @data: MTD partition parser-specific data 888 * 889 * This function tries to find partition on MTD device @master. It uses MTD 890 * partition parsers, specified in @types. However, if @types is %NULL, then 891 * the default list of parsers is used. The default list contains only the 892 * "cmdlinepart" and "ofpart" parsers ATM. 893 * Note: If there are more then one parser in @types, the kernel only takes the 894 * partitions parsed out by the first parser. 895 * 896 * This function may return: 897 * o a negative error code in case of failure 898 * o zero otherwise, and @pparts will describe the partitions, number of 899 * partitions, and the parser which parsed them. Caller must release 900 * resources with mtd_part_parser_cleanup() when finished with the returned 901 * data. 902 */ 903 int parse_mtd_partitions(struct mtd_info *master, const char *const *types, 904 struct mtd_partitions *pparts, 905 struct mtd_part_parser_data *data) 906 { 907 struct mtd_part_parser *parser; 908 int ret, err = 0; 909 910 if (!types) 911 types = default_mtd_part_types; 912 913 for ( ; *types; types++) { 914 pr_debug("%s: parsing partitions %s\n", master->name, *types); 915 parser = mtd_part_parser_get(*types); 916 if (!parser && !request_module("%s", *types)) 917 parser = mtd_part_parser_get(*types); 918 pr_debug("%s: got parser %s\n", master->name, 919 parser ? parser->name : NULL); 920 if (!parser) 921 continue; 922 ret = mtd_part_do_parse(parser, master, pparts, data); 923 /* Found partitions! */ 924 if (ret > 0) 925 return 0; 926 mtd_part_parser_put(parser); 927 /* 928 * Stash the first error we see; only report it if no parser 929 * succeeds 930 */ 931 if (ret < 0 && !err) 932 err = ret; 933 } 934 return err; 935 } 936 937 void mtd_part_parser_cleanup(struct mtd_partitions *parts) 938 { 939 const struct mtd_part_parser *parser; 940 941 if (!parts) 942 return; 943 944 parser = parts->parser; 945 if (parser) { 946 if (parser->cleanup) 947 parser->cleanup(parts->parts, parts->nr_parts); 948 949 mtd_part_parser_put(parser); 950 } 951 } 952 953 int mtd_is_partition(const struct mtd_info *mtd) 954 { 955 struct mtd_part *part; 956 int ispart = 0; 957 958 mutex_lock(&mtd_partitions_mutex); 959 list_for_each_entry(part, &mtd_partitions, list) 960 if (&part->mtd == mtd) { 961 ispart = 1; 962 break; 963 } 964 mutex_unlock(&mtd_partitions_mutex); 965 966 return ispart; 967 } 968 EXPORT_SYMBOL_GPL(mtd_is_partition); 969 970 /* Returns the size of the entire flash chip */ 971 uint64_t mtd_get_device_size(const struct mtd_info *mtd) 972 { 973 if (!mtd_is_partition(mtd)) 974 return mtd->size; 975 976 return mtd_get_device_size(mtd_to_part(mtd)->parent); 977 } 978 EXPORT_SYMBOL_GPL(mtd_get_device_size); 979