1 /* 2 * Simple MTD partitioning layer 3 * 4 * (C) 2000 Nicolas Pitre <nico@cam.org> 5 * 6 * This code is GPL 7 * 8 * $Id: mtdpart.c,v 1.53 2005/02/08 17:11:13 nico Exp $ 9 * 10 * 02-21-2002 Thomas Gleixner <gleixner@autronix.de> 11 * added support for read_oob, write_oob 12 */ 13 14 #include <linux/module.h> 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/slab.h> 18 #include <linux/list.h> 19 #include <linux/config.h> 20 #include <linux/kmod.h> 21 #include <linux/mtd/mtd.h> 22 #include <linux/mtd/partitions.h> 23 #include <linux/mtd/compatmac.h> 24 25 /* Our partition linked list */ 26 static LIST_HEAD(mtd_partitions); 27 28 /* Our partition node structure */ 29 struct mtd_part { 30 struct mtd_info mtd; 31 struct mtd_info *master; 32 u_int32_t offset; 33 int index; 34 struct list_head list; 35 int registered; 36 }; 37 38 /* 39 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve 40 * the pointer to that structure with this macro. 41 */ 42 #define PART(x) ((struct mtd_part *)(x)) 43 44 45 /* 46 * MTD methods which simply translate the effective address and pass through 47 * to the _real_ device. 48 */ 49 50 static int part_read (struct mtd_info *mtd, loff_t from, size_t len, 51 size_t *retlen, u_char *buf) 52 { 53 struct mtd_part *part = PART(mtd); 54 if (from >= mtd->size) 55 len = 0; 56 else if (from + len > mtd->size) 57 len = mtd->size - from; 58 if (part->master->read_ecc == NULL) 59 return part->master->read (part->master, from + part->offset, 60 len, retlen, buf); 61 else 62 return part->master->read_ecc (part->master, from + part->offset, 63 len, retlen, buf, NULL, &mtd->oobinfo); 64 } 65 66 static int part_point (struct mtd_info *mtd, loff_t from, size_t len, 67 size_t *retlen, u_char **buf) 68 { 69 struct mtd_part *part = PART(mtd); 70 if (from >= mtd->size) 71 len = 0; 72 else if (from + len > mtd->size) 73 len = mtd->size - from; 74 return part->master->point (part->master, from + part->offset, 75 len, retlen, buf); 76 } 77 static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len) 78 { 79 struct mtd_part *part = PART(mtd); 80 81 part->master->unpoint (part->master, addr, from + part->offset, len); 82 } 83 84 85 static int part_read_ecc (struct mtd_info *mtd, loff_t from, size_t len, 86 size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel) 87 { 88 struct mtd_part *part = PART(mtd); 89 if (oobsel == NULL) 90 oobsel = &mtd->oobinfo; 91 if (from >= mtd->size) 92 len = 0; 93 else if (from + len > mtd->size) 94 len = mtd->size - from; 95 return part->master->read_ecc (part->master, from + part->offset, 96 len, retlen, buf, eccbuf, oobsel); 97 } 98 99 static int part_read_oob (struct mtd_info *mtd, loff_t from, size_t len, 100 size_t *retlen, u_char *buf) 101 { 102 struct mtd_part *part = PART(mtd); 103 if (from >= mtd->size) 104 len = 0; 105 else if (from + len > mtd->size) 106 len = mtd->size - from; 107 return part->master->read_oob (part->master, from + part->offset, 108 len, retlen, buf); 109 } 110 111 static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, 112 size_t *retlen, u_char *buf) 113 { 114 struct mtd_part *part = PART(mtd); 115 return part->master->read_user_prot_reg (part->master, from, 116 len, retlen, buf); 117 } 118 119 static int part_get_user_prot_info (struct mtd_info *mtd, 120 struct otp_info *buf, size_t len) 121 { 122 struct mtd_part *part = PART(mtd); 123 return part->master->get_user_prot_info (part->master, buf, len); 124 } 125 126 static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, 127 size_t *retlen, u_char *buf) 128 { 129 struct mtd_part *part = PART(mtd); 130 return part->master->read_fact_prot_reg (part->master, from, 131 len, retlen, buf); 132 } 133 134 static int part_get_fact_prot_info (struct mtd_info *mtd, 135 struct otp_info *buf, size_t len) 136 { 137 struct mtd_part *part = PART(mtd); 138 return part->master->get_fact_prot_info (part->master, buf, len); 139 } 140 141 static int part_write (struct mtd_info *mtd, loff_t to, size_t len, 142 size_t *retlen, const u_char *buf) 143 { 144 struct mtd_part *part = PART(mtd); 145 if (!(mtd->flags & MTD_WRITEABLE)) 146 return -EROFS; 147 if (to >= mtd->size) 148 len = 0; 149 else if (to + len > mtd->size) 150 len = mtd->size - to; 151 if (part->master->write_ecc == NULL) 152 return part->master->write (part->master, to + part->offset, 153 len, retlen, buf); 154 else 155 return part->master->write_ecc (part->master, to + part->offset, 156 len, retlen, buf, NULL, &mtd->oobinfo); 157 158 } 159 160 static int part_write_ecc (struct mtd_info *mtd, loff_t to, size_t len, 161 size_t *retlen, const u_char *buf, 162 u_char *eccbuf, struct nand_oobinfo *oobsel) 163 { 164 struct mtd_part *part = PART(mtd); 165 if (!(mtd->flags & MTD_WRITEABLE)) 166 return -EROFS; 167 if (oobsel == NULL) 168 oobsel = &mtd->oobinfo; 169 if (to >= mtd->size) 170 len = 0; 171 else if (to + len > mtd->size) 172 len = mtd->size - to; 173 return part->master->write_ecc (part->master, to + part->offset, 174 len, retlen, buf, eccbuf, oobsel); 175 } 176 177 static int part_write_oob (struct mtd_info *mtd, loff_t to, size_t len, 178 size_t *retlen, const u_char *buf) 179 { 180 struct mtd_part *part = PART(mtd); 181 if (!(mtd->flags & MTD_WRITEABLE)) 182 return -EROFS; 183 if (to >= mtd->size) 184 len = 0; 185 else if (to + len > mtd->size) 186 len = mtd->size - to; 187 return part->master->write_oob (part->master, to + part->offset, 188 len, retlen, buf); 189 } 190 191 static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, 192 size_t *retlen, u_char *buf) 193 { 194 struct mtd_part *part = PART(mtd); 195 return part->master->write_user_prot_reg (part->master, from, 196 len, retlen, buf); 197 } 198 199 static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len) 200 { 201 struct mtd_part *part = PART(mtd); 202 return part->master->lock_user_prot_reg (part->master, from, len); 203 } 204 205 static int part_writev (struct mtd_info *mtd, const struct kvec *vecs, 206 unsigned long count, loff_t to, size_t *retlen) 207 { 208 struct mtd_part *part = PART(mtd); 209 if (!(mtd->flags & MTD_WRITEABLE)) 210 return -EROFS; 211 if (part->master->writev_ecc == NULL) 212 return part->master->writev (part->master, vecs, count, 213 to + part->offset, retlen); 214 else 215 return part->master->writev_ecc (part->master, vecs, count, 216 to + part->offset, retlen, 217 NULL, &mtd->oobinfo); 218 } 219 220 static int part_readv (struct mtd_info *mtd, struct kvec *vecs, 221 unsigned long count, loff_t from, size_t *retlen) 222 { 223 struct mtd_part *part = PART(mtd); 224 if (part->master->readv_ecc == NULL) 225 return part->master->readv (part->master, vecs, count, 226 from + part->offset, retlen); 227 else 228 return part->master->readv_ecc (part->master, vecs, count, 229 from + part->offset, retlen, 230 NULL, &mtd->oobinfo); 231 } 232 233 static int part_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs, 234 unsigned long count, loff_t to, size_t *retlen, 235 u_char *eccbuf, struct nand_oobinfo *oobsel) 236 { 237 struct mtd_part *part = PART(mtd); 238 if (!(mtd->flags & MTD_WRITEABLE)) 239 return -EROFS; 240 if (oobsel == NULL) 241 oobsel = &mtd->oobinfo; 242 return part->master->writev_ecc (part->master, vecs, count, 243 to + part->offset, retlen, 244 eccbuf, oobsel); 245 } 246 247 static int part_readv_ecc (struct mtd_info *mtd, struct kvec *vecs, 248 unsigned long count, loff_t from, size_t *retlen, 249 u_char *eccbuf, struct nand_oobinfo *oobsel) 250 { 251 struct mtd_part *part = PART(mtd); 252 if (oobsel == NULL) 253 oobsel = &mtd->oobinfo; 254 return part->master->readv_ecc (part->master, vecs, count, 255 from + part->offset, retlen, 256 eccbuf, oobsel); 257 } 258 259 static int part_erase (struct mtd_info *mtd, struct erase_info *instr) 260 { 261 struct mtd_part *part = PART(mtd); 262 int ret; 263 if (!(mtd->flags & MTD_WRITEABLE)) 264 return -EROFS; 265 if (instr->addr >= mtd->size) 266 return -EINVAL; 267 instr->addr += part->offset; 268 ret = part->master->erase(part->master, instr); 269 return ret; 270 } 271 272 void mtd_erase_callback(struct erase_info *instr) 273 { 274 if (instr->mtd->erase == part_erase) { 275 struct mtd_part *part = PART(instr->mtd); 276 277 if (instr->fail_addr != 0xffffffff) 278 instr->fail_addr -= part->offset; 279 instr->addr -= part->offset; 280 } 281 if (instr->callback) 282 instr->callback(instr); 283 } 284 EXPORT_SYMBOL_GPL(mtd_erase_callback); 285 286 static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len) 287 { 288 struct mtd_part *part = PART(mtd); 289 if ((len + ofs) > mtd->size) 290 return -EINVAL; 291 return part->master->lock(part->master, ofs + part->offset, len); 292 } 293 294 static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len) 295 { 296 struct mtd_part *part = PART(mtd); 297 if ((len + ofs) > mtd->size) 298 return -EINVAL; 299 return part->master->unlock(part->master, ofs + part->offset, len); 300 } 301 302 static void part_sync(struct mtd_info *mtd) 303 { 304 struct mtd_part *part = PART(mtd); 305 part->master->sync(part->master); 306 } 307 308 static int part_suspend(struct mtd_info *mtd) 309 { 310 struct mtd_part *part = PART(mtd); 311 return part->master->suspend(part->master); 312 } 313 314 static void part_resume(struct mtd_info *mtd) 315 { 316 struct mtd_part *part = PART(mtd); 317 part->master->resume(part->master); 318 } 319 320 static int part_block_isbad (struct mtd_info *mtd, loff_t ofs) 321 { 322 struct mtd_part *part = PART(mtd); 323 if (ofs >= mtd->size) 324 return -EINVAL; 325 ofs += part->offset; 326 return part->master->block_isbad(part->master, ofs); 327 } 328 329 static int part_block_markbad (struct mtd_info *mtd, loff_t ofs) 330 { 331 struct mtd_part *part = PART(mtd); 332 if (!(mtd->flags & MTD_WRITEABLE)) 333 return -EROFS; 334 if (ofs >= mtd->size) 335 return -EINVAL; 336 ofs += part->offset; 337 return part->master->block_markbad(part->master, ofs); 338 } 339 340 /* 341 * This function unregisters and destroy all slave MTD objects which are 342 * attached to the given master MTD object. 343 */ 344 345 int del_mtd_partitions(struct mtd_info *master) 346 { 347 struct list_head *node; 348 struct mtd_part *slave; 349 350 for (node = mtd_partitions.next; 351 node != &mtd_partitions; 352 node = node->next) { 353 slave = list_entry(node, struct mtd_part, list); 354 if (slave->master == master) { 355 struct list_head *prev = node->prev; 356 __list_del(prev, node->next); 357 if(slave->registered) 358 del_mtd_device(&slave->mtd); 359 kfree(slave); 360 node = prev; 361 } 362 } 363 364 return 0; 365 } 366 367 /* 368 * This function, given a master MTD object and a partition table, creates 369 * and registers slave MTD objects which are bound to the master according to 370 * the partition definitions. 371 * (Q: should we register the master MTD object as well?) 372 */ 373 374 int add_mtd_partitions(struct mtd_info *master, 375 const struct mtd_partition *parts, 376 int nbparts) 377 { 378 struct mtd_part *slave; 379 u_int32_t cur_offset = 0; 380 int i; 381 382 printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 383 384 for (i = 0; i < nbparts; i++) { 385 386 /* allocate the partition structure */ 387 slave = kmalloc (sizeof(*slave), GFP_KERNEL); 388 if (!slave) { 389 printk ("memory allocation error while creating partitions for \"%s\"\n", 390 master->name); 391 del_mtd_partitions(master); 392 return -ENOMEM; 393 } 394 memset(slave, 0, sizeof(*slave)); 395 list_add(&slave->list, &mtd_partitions); 396 397 /* set up the MTD object for this partition */ 398 slave->mtd.type = master->type; 399 slave->mtd.flags = master->flags & ~parts[i].mask_flags; 400 slave->mtd.size = parts[i].size; 401 slave->mtd.oobblock = master->oobblock; 402 slave->mtd.oobsize = master->oobsize; 403 slave->mtd.ecctype = master->ecctype; 404 slave->mtd.eccsize = master->eccsize; 405 406 slave->mtd.name = parts[i].name; 407 slave->mtd.bank_size = master->bank_size; 408 slave->mtd.owner = master->owner; 409 410 slave->mtd.read = part_read; 411 slave->mtd.write = part_write; 412 413 if(master->point && master->unpoint){ 414 slave->mtd.point = part_point; 415 slave->mtd.unpoint = part_unpoint; 416 } 417 418 if (master->read_ecc) 419 slave->mtd.read_ecc = part_read_ecc; 420 if (master->write_ecc) 421 slave->mtd.write_ecc = part_write_ecc; 422 if (master->read_oob) 423 slave->mtd.read_oob = part_read_oob; 424 if (master->write_oob) 425 slave->mtd.write_oob = part_write_oob; 426 if(master->read_user_prot_reg) 427 slave->mtd.read_user_prot_reg = part_read_user_prot_reg; 428 if(master->read_fact_prot_reg) 429 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; 430 if(master->write_user_prot_reg) 431 slave->mtd.write_user_prot_reg = part_write_user_prot_reg; 432 if(master->lock_user_prot_reg) 433 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; 434 if(master->get_user_prot_info) 435 slave->mtd.get_user_prot_info = part_get_user_prot_info; 436 if(master->get_fact_prot_info) 437 slave->mtd.get_fact_prot_info = part_get_fact_prot_info; 438 if (master->sync) 439 slave->mtd.sync = part_sync; 440 if (!i && master->suspend && master->resume) { 441 slave->mtd.suspend = part_suspend; 442 slave->mtd.resume = part_resume; 443 } 444 if (master->writev) 445 slave->mtd.writev = part_writev; 446 if (master->readv) 447 slave->mtd.readv = part_readv; 448 if (master->writev_ecc) 449 slave->mtd.writev_ecc = part_writev_ecc; 450 if (master->readv_ecc) 451 slave->mtd.readv_ecc = part_readv_ecc; 452 if (master->lock) 453 slave->mtd.lock = part_lock; 454 if (master->unlock) 455 slave->mtd.unlock = part_unlock; 456 if (master->block_isbad) 457 slave->mtd.block_isbad = part_block_isbad; 458 if (master->block_markbad) 459 slave->mtd.block_markbad = part_block_markbad; 460 slave->mtd.erase = part_erase; 461 slave->master = master; 462 slave->offset = parts[i].offset; 463 slave->index = i; 464 465 if (slave->offset == MTDPART_OFS_APPEND) 466 slave->offset = cur_offset; 467 if (slave->offset == MTDPART_OFS_NXTBLK) { 468 u_int32_t emask = master->erasesize-1; 469 slave->offset = (cur_offset + emask) & ~emask; 470 if (slave->offset != cur_offset) { 471 printk(KERN_NOTICE "Moving partition %d: " 472 "0x%08x -> 0x%08x\n", i, 473 cur_offset, slave->offset); 474 } 475 } 476 if (slave->mtd.size == MTDPART_SIZ_FULL) 477 slave->mtd.size = master->size - slave->offset; 478 cur_offset = slave->offset + slave->mtd.size; 479 480 printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset, 481 slave->offset + slave->mtd.size, slave->mtd.name); 482 483 /* let's do some sanity checks */ 484 if (slave->offset >= master->size) { 485 /* let's register it anyway to preserve ordering */ 486 slave->offset = 0; 487 slave->mtd.size = 0; 488 printk ("mtd: partition \"%s\" is out of reach -- disabled\n", 489 parts[i].name); 490 } 491 if (slave->offset + slave->mtd.size > master->size) { 492 slave->mtd.size = master->size - slave->offset; 493 printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n", 494 parts[i].name, master->name, slave->mtd.size); 495 } 496 if (master->numeraseregions>1) { 497 /* Deal with variable erase size stuff */ 498 int i; 499 struct mtd_erase_region_info *regions = master->eraseregions; 500 501 /* Find the first erase regions which is part of this partition. */ 502 for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++) 503 ; 504 505 for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) { 506 if (slave->mtd.erasesize < regions[i].erasesize) { 507 slave->mtd.erasesize = regions[i].erasesize; 508 } 509 } 510 } else { 511 /* Single erase size */ 512 slave->mtd.erasesize = master->erasesize; 513 } 514 515 if ((slave->mtd.flags & MTD_WRITEABLE) && 516 (slave->offset % slave->mtd.erasesize)) { 517 /* Doesn't start on a boundary of major erase size */ 518 /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */ 519 slave->mtd.flags &= ~MTD_WRITEABLE; 520 printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", 521 parts[i].name); 522 } 523 if ((slave->mtd.flags & MTD_WRITEABLE) && 524 (slave->mtd.size % slave->mtd.erasesize)) { 525 slave->mtd.flags &= ~MTD_WRITEABLE; 526 printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", 527 parts[i].name); 528 } 529 530 /* copy oobinfo from master */ 531 memcpy(&slave->mtd.oobinfo, &master->oobinfo, sizeof(slave->mtd.oobinfo)); 532 533 if(parts[i].mtdp) 534 { /* store the object pointer (caller may or may not register it */ 535 *parts[i].mtdp = &slave->mtd; 536 slave->registered = 0; 537 } 538 else 539 { 540 /* register our partition */ 541 add_mtd_device(&slave->mtd); 542 slave->registered = 1; 543 } 544 } 545 546 return 0; 547 } 548 549 EXPORT_SYMBOL(add_mtd_partitions); 550 EXPORT_SYMBOL(del_mtd_partitions); 551 552 static DEFINE_SPINLOCK(part_parser_lock); 553 static LIST_HEAD(part_parsers); 554 555 static struct mtd_part_parser *get_partition_parser(const char *name) 556 { 557 struct list_head *this; 558 void *ret = NULL; 559 spin_lock(&part_parser_lock); 560 561 list_for_each(this, &part_parsers) { 562 struct mtd_part_parser *p = list_entry(this, struct mtd_part_parser, list); 563 564 if (!strcmp(p->name, name) && try_module_get(p->owner)) { 565 ret = p; 566 break; 567 } 568 } 569 spin_unlock(&part_parser_lock); 570 571 return ret; 572 } 573 574 int register_mtd_parser(struct mtd_part_parser *p) 575 { 576 spin_lock(&part_parser_lock); 577 list_add(&p->list, &part_parsers); 578 spin_unlock(&part_parser_lock); 579 580 return 0; 581 } 582 583 int deregister_mtd_parser(struct mtd_part_parser *p) 584 { 585 spin_lock(&part_parser_lock); 586 list_del(&p->list); 587 spin_unlock(&part_parser_lock); 588 return 0; 589 } 590 591 int parse_mtd_partitions(struct mtd_info *master, const char **types, 592 struct mtd_partition **pparts, unsigned long origin) 593 { 594 struct mtd_part_parser *parser; 595 int ret = 0; 596 597 for ( ; ret <= 0 && *types; types++) { 598 parser = get_partition_parser(*types); 599 #ifdef CONFIG_KMOD 600 if (!parser && !request_module("%s", *types)) 601 parser = get_partition_parser(*types); 602 #endif 603 if (!parser) { 604 printk(KERN_NOTICE "%s partition parsing not available\n", 605 *types); 606 continue; 607 } 608 ret = (*parser->parse_fn)(master, pparts, origin); 609 if (ret > 0) { 610 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", 611 ret, parser->name, master->name); 612 } 613 put_partition_parser(parser); 614 } 615 return ret; 616 } 617 618 EXPORT_SYMBOL_GPL(parse_mtd_partitions); 619 EXPORT_SYMBOL_GPL(register_mtd_parser); 620 EXPORT_SYMBOL_GPL(deregister_mtd_parser); 621 622 MODULE_LICENSE("GPL"); 623 MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>"); 624 MODULE_DESCRIPTION("Generic support for partitioning of MTD devices"); 625 626