1 /* 2 * MTD device concatenation layer 3 * 4 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de> 5 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org> 6 * 7 * NAND support by Christian Gan <cgan@iders.ca> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 * 23 */ 24 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <linux/sched.h> 29 #include <linux/types.h> 30 #include <linux/backing-dev.h> 31 32 #include <linux/mtd/mtd.h> 33 #include <linux/mtd/concat.h> 34 35 #include <asm/div64.h> 36 37 /* 38 * Our storage structure: 39 * Subdev points to an array of pointers to struct mtd_info objects 40 * which is allocated along with this structure 41 * 42 */ 43 struct mtd_concat { 44 struct mtd_info mtd; 45 int num_subdev; 46 struct mtd_info **subdev; 47 }; 48 49 /* 50 * how to calculate the size required for the above structure, 51 * including the pointer array subdev points to: 52 */ 53 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \ 54 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *))) 55 56 /* 57 * Given a pointer to the MTD object in the mtd_concat structure, 58 * we can retrieve the pointer to that structure with this macro. 59 */ 60 #define CONCAT(x) ((struct mtd_concat *)(x)) 61 62 /* 63 * MTD methods which look up the relevant subdevice, translate the 64 * effective address and pass through to the subdevice. 65 */ 66 67 static int 68 concat_read(struct mtd_info *mtd, loff_t from, size_t len, 69 size_t * retlen, u_char * buf) 70 { 71 struct mtd_concat *concat = CONCAT(mtd); 72 int ret = 0, err; 73 int i; 74 75 *retlen = 0; 76 77 for (i = 0; i < concat->num_subdev; i++) { 78 struct mtd_info *subdev = concat->subdev[i]; 79 size_t size, retsize; 80 81 if (from >= subdev->size) { 82 /* Not destined for this subdev */ 83 size = 0; 84 from -= subdev->size; 85 continue; 86 } 87 if (from + len > subdev->size) 88 /* First part goes into this subdev */ 89 size = subdev->size - from; 90 else 91 /* Entire transaction goes into this subdev */ 92 size = len; 93 94 err = mtd_read(subdev, from, size, &retsize, buf); 95 96 /* Save information about bitflips! */ 97 if (unlikely(err)) { 98 if (mtd_is_eccerr(err)) { 99 mtd->ecc_stats.failed++; 100 ret = err; 101 } else if (mtd_is_bitflip(err)) { 102 mtd->ecc_stats.corrected++; 103 /* Do not overwrite -EBADMSG !! */ 104 if (!ret) 105 ret = err; 106 } else 107 return err; 108 } 109 110 *retlen += retsize; 111 len -= size; 112 if (len == 0) 113 return ret; 114 115 buf += size; 116 from = 0; 117 } 118 return -EINVAL; 119 } 120 121 static int 122 concat_write(struct mtd_info *mtd, loff_t to, size_t len, 123 size_t * retlen, const u_char * buf) 124 { 125 struct mtd_concat *concat = CONCAT(mtd); 126 int err = -EINVAL; 127 int i; 128 129 *retlen = 0; 130 131 for (i = 0; i < concat->num_subdev; i++) { 132 struct mtd_info *subdev = concat->subdev[i]; 133 size_t size, retsize; 134 135 if (to >= subdev->size) { 136 size = 0; 137 to -= subdev->size; 138 continue; 139 } 140 if (to + len > subdev->size) 141 size = subdev->size - to; 142 else 143 size = len; 144 145 err = mtd_write(subdev, to, size, &retsize, buf); 146 if (err) 147 break; 148 149 *retlen += retsize; 150 len -= size; 151 if (len == 0) 152 break; 153 154 err = -EINVAL; 155 buf += size; 156 to = 0; 157 } 158 return err; 159 } 160 161 static int 162 concat_writev(struct mtd_info *mtd, const struct kvec *vecs, 163 unsigned long count, loff_t to, size_t * retlen) 164 { 165 struct mtd_concat *concat = CONCAT(mtd); 166 struct kvec *vecs_copy; 167 unsigned long entry_low, entry_high; 168 size_t total_len = 0; 169 int i; 170 int err = -EINVAL; 171 172 *retlen = 0; 173 174 /* Calculate total length of data */ 175 for (i = 0; i < count; i++) 176 total_len += vecs[i].iov_len; 177 178 /* Check alignment */ 179 if (mtd->writesize > 1) { 180 uint64_t __to = to; 181 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize)) 182 return -EINVAL; 183 } 184 185 /* make a copy of vecs */ 186 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL); 187 if (!vecs_copy) 188 return -ENOMEM; 189 190 entry_low = 0; 191 for (i = 0; i < concat->num_subdev; i++) { 192 struct mtd_info *subdev = concat->subdev[i]; 193 size_t size, wsize, retsize, old_iov_len; 194 195 if (to >= subdev->size) { 196 to -= subdev->size; 197 continue; 198 } 199 200 size = min_t(uint64_t, total_len, subdev->size - to); 201 wsize = size; /* store for future use */ 202 203 entry_high = entry_low; 204 while (entry_high < count) { 205 if (size <= vecs_copy[entry_high].iov_len) 206 break; 207 size -= vecs_copy[entry_high++].iov_len; 208 } 209 210 old_iov_len = vecs_copy[entry_high].iov_len; 211 vecs_copy[entry_high].iov_len = size; 212 213 err = mtd_writev(subdev, &vecs_copy[entry_low], 214 entry_high - entry_low + 1, to, &retsize); 215 216 vecs_copy[entry_high].iov_len = old_iov_len - size; 217 vecs_copy[entry_high].iov_base += size; 218 219 entry_low = entry_high; 220 221 if (err) 222 break; 223 224 *retlen += retsize; 225 total_len -= wsize; 226 227 if (total_len == 0) 228 break; 229 230 err = -EINVAL; 231 to = 0; 232 } 233 234 kfree(vecs_copy); 235 return err; 236 } 237 238 static int 239 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 240 { 241 struct mtd_concat *concat = CONCAT(mtd); 242 struct mtd_oob_ops devops = *ops; 243 int i, err, ret = 0; 244 245 ops->retlen = ops->oobretlen = 0; 246 247 for (i = 0; i < concat->num_subdev; i++) { 248 struct mtd_info *subdev = concat->subdev[i]; 249 250 if (from >= subdev->size) { 251 from -= subdev->size; 252 continue; 253 } 254 255 /* partial read ? */ 256 if (from + devops.len > subdev->size) 257 devops.len = subdev->size - from; 258 259 err = mtd_read_oob(subdev, from, &devops); 260 ops->retlen += devops.retlen; 261 ops->oobretlen += devops.oobretlen; 262 263 /* Save information about bitflips! */ 264 if (unlikely(err)) { 265 if (mtd_is_eccerr(err)) { 266 mtd->ecc_stats.failed++; 267 ret = err; 268 } else if (mtd_is_bitflip(err)) { 269 mtd->ecc_stats.corrected++; 270 /* Do not overwrite -EBADMSG !! */ 271 if (!ret) 272 ret = err; 273 } else 274 return err; 275 } 276 277 if (devops.datbuf) { 278 devops.len = ops->len - ops->retlen; 279 if (!devops.len) 280 return ret; 281 devops.datbuf += devops.retlen; 282 } 283 if (devops.oobbuf) { 284 devops.ooblen = ops->ooblen - ops->oobretlen; 285 if (!devops.ooblen) 286 return ret; 287 devops.oobbuf += ops->oobretlen; 288 } 289 290 from = 0; 291 } 292 return -EINVAL; 293 } 294 295 static int 296 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) 297 { 298 struct mtd_concat *concat = CONCAT(mtd); 299 struct mtd_oob_ops devops = *ops; 300 int i, err; 301 302 if (!(mtd->flags & MTD_WRITEABLE)) 303 return -EROFS; 304 305 ops->retlen = ops->oobretlen = 0; 306 307 for (i = 0; i < concat->num_subdev; i++) { 308 struct mtd_info *subdev = concat->subdev[i]; 309 310 if (to >= subdev->size) { 311 to -= subdev->size; 312 continue; 313 } 314 315 /* partial write ? */ 316 if (to + devops.len > subdev->size) 317 devops.len = subdev->size - to; 318 319 err = mtd_write_oob(subdev, to, &devops); 320 ops->retlen += devops.oobretlen; 321 if (err) 322 return err; 323 324 if (devops.datbuf) { 325 devops.len = ops->len - ops->retlen; 326 if (!devops.len) 327 return 0; 328 devops.datbuf += devops.retlen; 329 } 330 if (devops.oobbuf) { 331 devops.ooblen = ops->ooblen - ops->oobretlen; 332 if (!devops.ooblen) 333 return 0; 334 devops.oobbuf += devops.oobretlen; 335 } 336 to = 0; 337 } 338 return -EINVAL; 339 } 340 341 static void concat_erase_callback(struct erase_info *instr) 342 { 343 wake_up((wait_queue_head_t *) instr->priv); 344 } 345 346 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase) 347 { 348 int err; 349 wait_queue_head_t waitq; 350 DECLARE_WAITQUEUE(wait, current); 351 352 /* 353 * This code was stol^H^H^H^Hinspired by mtdchar.c 354 */ 355 init_waitqueue_head(&waitq); 356 357 erase->mtd = mtd; 358 erase->callback = concat_erase_callback; 359 erase->priv = (unsigned long) &waitq; 360 361 /* 362 * FIXME: Allow INTERRUPTIBLE. Which means 363 * not having the wait_queue head on the stack. 364 */ 365 err = mtd_erase(mtd, erase); 366 if (!err) { 367 set_current_state(TASK_UNINTERRUPTIBLE); 368 add_wait_queue(&waitq, &wait); 369 if (erase->state != MTD_ERASE_DONE 370 && erase->state != MTD_ERASE_FAILED) 371 schedule(); 372 remove_wait_queue(&waitq, &wait); 373 set_current_state(TASK_RUNNING); 374 375 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0; 376 } 377 return err; 378 } 379 380 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) 381 { 382 struct mtd_concat *concat = CONCAT(mtd); 383 struct mtd_info *subdev; 384 int i, err; 385 uint64_t length, offset = 0; 386 struct erase_info *erase; 387 388 /* 389 * Check for proper erase block alignment of the to-be-erased area. 390 * It is easier to do this based on the super device's erase 391 * region info rather than looking at each particular sub-device 392 * in turn. 393 */ 394 if (!concat->mtd.numeraseregions) { 395 /* the easy case: device has uniform erase block size */ 396 if (instr->addr & (concat->mtd.erasesize - 1)) 397 return -EINVAL; 398 if (instr->len & (concat->mtd.erasesize - 1)) 399 return -EINVAL; 400 } else { 401 /* device has variable erase size */ 402 struct mtd_erase_region_info *erase_regions = 403 concat->mtd.eraseregions; 404 405 /* 406 * Find the erase region where the to-be-erased area begins: 407 */ 408 for (i = 0; i < concat->mtd.numeraseregions && 409 instr->addr >= erase_regions[i].offset; i++) ; 410 --i; 411 412 /* 413 * Now erase_regions[i] is the region in which the 414 * to-be-erased area begins. Verify that the starting 415 * offset is aligned to this region's erase size: 416 */ 417 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1)) 418 return -EINVAL; 419 420 /* 421 * now find the erase region where the to-be-erased area ends: 422 */ 423 for (; i < concat->mtd.numeraseregions && 424 (instr->addr + instr->len) >= erase_regions[i].offset; 425 ++i) ; 426 --i; 427 /* 428 * check if the ending offset is aligned to this region's erase size 429 */ 430 if (i < 0 || ((instr->addr + instr->len) & 431 (erase_regions[i].erasesize - 1))) 432 return -EINVAL; 433 } 434 435 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 436 437 /* make a local copy of instr to avoid modifying the caller's struct */ 438 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); 439 440 if (!erase) 441 return -ENOMEM; 442 443 *erase = *instr; 444 length = instr->len; 445 446 /* 447 * find the subdevice where the to-be-erased area begins, adjust 448 * starting offset to be relative to the subdevice start 449 */ 450 for (i = 0; i < concat->num_subdev; i++) { 451 subdev = concat->subdev[i]; 452 if (subdev->size <= erase->addr) { 453 erase->addr -= subdev->size; 454 offset += subdev->size; 455 } else { 456 break; 457 } 458 } 459 460 /* must never happen since size limit has been verified above */ 461 BUG_ON(i >= concat->num_subdev); 462 463 /* now do the erase: */ 464 err = 0; 465 for (; length > 0; i++) { 466 /* loop for all subdevices affected by this request */ 467 subdev = concat->subdev[i]; /* get current subdevice */ 468 469 /* limit length to subdevice's size: */ 470 if (erase->addr + length > subdev->size) 471 erase->len = subdev->size - erase->addr; 472 else 473 erase->len = length; 474 475 length -= erase->len; 476 if ((err = concat_dev_erase(subdev, erase))) { 477 /* sanity check: should never happen since 478 * block alignment has been checked above */ 479 BUG_ON(err == -EINVAL); 480 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 481 instr->fail_addr = erase->fail_addr + offset; 482 break; 483 } 484 /* 485 * erase->addr specifies the offset of the area to be 486 * erased *within the current subdevice*. It can be 487 * non-zero only the first time through this loop, i.e. 488 * for the first subdevice where blocks need to be erased. 489 * All the following erases must begin at the start of the 490 * current subdevice, i.e. at offset zero. 491 */ 492 erase->addr = 0; 493 offset += subdev->size; 494 } 495 instr->state = erase->state; 496 kfree(erase); 497 if (err) 498 return err; 499 500 if (instr->callback) 501 instr->callback(instr); 502 return 0; 503 } 504 505 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 506 { 507 struct mtd_concat *concat = CONCAT(mtd); 508 int i, err = -EINVAL; 509 510 for (i = 0; i < concat->num_subdev; i++) { 511 struct mtd_info *subdev = concat->subdev[i]; 512 uint64_t size; 513 514 if (ofs >= subdev->size) { 515 size = 0; 516 ofs -= subdev->size; 517 continue; 518 } 519 if (ofs + len > subdev->size) 520 size = subdev->size - ofs; 521 else 522 size = len; 523 524 err = mtd_lock(subdev, ofs, size); 525 if (err) 526 break; 527 528 len -= size; 529 if (len == 0) 530 break; 531 532 err = -EINVAL; 533 ofs = 0; 534 } 535 536 return err; 537 } 538 539 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 540 { 541 struct mtd_concat *concat = CONCAT(mtd); 542 int i, err = 0; 543 544 for (i = 0; i < concat->num_subdev; i++) { 545 struct mtd_info *subdev = concat->subdev[i]; 546 uint64_t size; 547 548 if (ofs >= subdev->size) { 549 size = 0; 550 ofs -= subdev->size; 551 continue; 552 } 553 if (ofs + len > subdev->size) 554 size = subdev->size - ofs; 555 else 556 size = len; 557 558 err = mtd_unlock(subdev, ofs, size); 559 if (err) 560 break; 561 562 len -= size; 563 if (len == 0) 564 break; 565 566 err = -EINVAL; 567 ofs = 0; 568 } 569 570 return err; 571 } 572 573 static void concat_sync(struct mtd_info *mtd) 574 { 575 struct mtd_concat *concat = CONCAT(mtd); 576 int i; 577 578 for (i = 0; i < concat->num_subdev; i++) { 579 struct mtd_info *subdev = concat->subdev[i]; 580 mtd_sync(subdev); 581 } 582 } 583 584 static int concat_suspend(struct mtd_info *mtd) 585 { 586 struct mtd_concat *concat = CONCAT(mtd); 587 int i, rc = 0; 588 589 for (i = 0; i < concat->num_subdev; i++) { 590 struct mtd_info *subdev = concat->subdev[i]; 591 if ((rc = mtd_suspend(subdev)) < 0) 592 return rc; 593 } 594 return rc; 595 } 596 597 static void concat_resume(struct mtd_info *mtd) 598 { 599 struct mtd_concat *concat = CONCAT(mtd); 600 int i; 601 602 for (i = 0; i < concat->num_subdev; i++) { 603 struct mtd_info *subdev = concat->subdev[i]; 604 mtd_resume(subdev); 605 } 606 } 607 608 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs) 609 { 610 struct mtd_concat *concat = CONCAT(mtd); 611 int i, res = 0; 612 613 if (!mtd_can_have_bb(concat->subdev[0])) 614 return res; 615 616 for (i = 0; i < concat->num_subdev; i++) { 617 struct mtd_info *subdev = concat->subdev[i]; 618 619 if (ofs >= subdev->size) { 620 ofs -= subdev->size; 621 continue; 622 } 623 624 res = mtd_block_isbad(subdev, ofs); 625 break; 626 } 627 628 return res; 629 } 630 631 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs) 632 { 633 struct mtd_concat *concat = CONCAT(mtd); 634 int i, err = -EINVAL; 635 636 for (i = 0; i < concat->num_subdev; i++) { 637 struct mtd_info *subdev = concat->subdev[i]; 638 639 if (ofs >= subdev->size) { 640 ofs -= subdev->size; 641 continue; 642 } 643 644 err = mtd_block_markbad(subdev, ofs); 645 if (!err) 646 mtd->ecc_stats.badblocks++; 647 break; 648 } 649 650 return err; 651 } 652 653 /* 654 * try to support NOMMU mmaps on concatenated devices 655 * - we don't support subdev spanning as we can't guarantee it'll work 656 */ 657 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd, 658 unsigned long len, 659 unsigned long offset, 660 unsigned long flags) 661 { 662 struct mtd_concat *concat = CONCAT(mtd); 663 int i; 664 665 for (i = 0; i < concat->num_subdev; i++) { 666 struct mtd_info *subdev = concat->subdev[i]; 667 668 if (offset >= subdev->size) { 669 offset -= subdev->size; 670 continue; 671 } 672 673 return mtd_get_unmapped_area(subdev, len, offset, flags); 674 } 675 676 return (unsigned long) -ENOSYS; 677 } 678 679 /* 680 * This function constructs a virtual MTD device by concatenating 681 * num_devs MTD devices. A pointer to the new device object is 682 * stored to *new_dev upon success. This function does _not_ 683 * register any devices: this is the caller's responsibility. 684 */ 685 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */ 686 int num_devs, /* number of subdevices */ 687 const char *name) 688 { /* name for the new device */ 689 int i; 690 size_t size; 691 struct mtd_concat *concat; 692 uint32_t max_erasesize, curr_erasesize; 693 int num_erase_region; 694 int max_writebufsize = 0; 695 696 printk(KERN_NOTICE "Concatenating MTD devices:\n"); 697 for (i = 0; i < num_devs; i++) 698 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name); 699 printk(KERN_NOTICE "into device \"%s\"\n", name); 700 701 /* allocate the device structure */ 702 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs); 703 concat = kzalloc(size, GFP_KERNEL); 704 if (!concat) { 705 printk 706 ("memory allocation error while creating concatenated device \"%s\"\n", 707 name); 708 return NULL; 709 } 710 concat->subdev = (struct mtd_info **) (concat + 1); 711 712 /* 713 * Set up the new "super" device's MTD object structure, check for 714 * incompatibilities between the subdevices. 715 */ 716 concat->mtd.type = subdev[0]->type; 717 concat->mtd.flags = subdev[0]->flags; 718 concat->mtd.size = subdev[0]->size; 719 concat->mtd.erasesize = subdev[0]->erasesize; 720 concat->mtd.writesize = subdev[0]->writesize; 721 722 for (i = 0; i < num_devs; i++) 723 if (max_writebufsize < subdev[i]->writebufsize) 724 max_writebufsize = subdev[i]->writebufsize; 725 concat->mtd.writebufsize = max_writebufsize; 726 727 concat->mtd.subpage_sft = subdev[0]->subpage_sft; 728 concat->mtd.oobsize = subdev[0]->oobsize; 729 concat->mtd.oobavail = subdev[0]->oobavail; 730 if (subdev[0]->_writev) 731 concat->mtd._writev = concat_writev; 732 if (subdev[0]->_read_oob) 733 concat->mtd._read_oob = concat_read_oob; 734 if (subdev[0]->_write_oob) 735 concat->mtd._write_oob = concat_write_oob; 736 if (subdev[0]->_block_isbad) 737 concat->mtd._block_isbad = concat_block_isbad; 738 if (subdev[0]->_block_markbad) 739 concat->mtd._block_markbad = concat_block_markbad; 740 741 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; 742 743 concat->mtd.backing_dev_info = subdev[0]->backing_dev_info; 744 745 concat->subdev[0] = subdev[0]; 746 747 for (i = 1; i < num_devs; i++) { 748 if (concat->mtd.type != subdev[i]->type) { 749 kfree(concat); 750 printk("Incompatible device type on \"%s\"\n", 751 subdev[i]->name); 752 return NULL; 753 } 754 if (concat->mtd.flags != subdev[i]->flags) { 755 /* 756 * Expect all flags except MTD_WRITEABLE to be 757 * equal on all subdevices. 758 */ 759 if ((concat->mtd.flags ^ subdev[i]-> 760 flags) & ~MTD_WRITEABLE) { 761 kfree(concat); 762 printk("Incompatible device flags on \"%s\"\n", 763 subdev[i]->name); 764 return NULL; 765 } else 766 /* if writeable attribute differs, 767 make super device writeable */ 768 concat->mtd.flags |= 769 subdev[i]->flags & MTD_WRITEABLE; 770 } 771 772 /* only permit direct mapping if the BDIs are all the same 773 * - copy-mapping is still permitted 774 */ 775 if (concat->mtd.backing_dev_info != 776 subdev[i]->backing_dev_info) 777 concat->mtd.backing_dev_info = 778 &default_backing_dev_info; 779 780 concat->mtd.size += subdev[i]->size; 781 concat->mtd.ecc_stats.badblocks += 782 subdev[i]->ecc_stats.badblocks; 783 if (concat->mtd.writesize != subdev[i]->writesize || 784 concat->mtd.subpage_sft != subdev[i]->subpage_sft || 785 concat->mtd.oobsize != subdev[i]->oobsize || 786 !concat->mtd._read_oob != !subdev[i]->_read_oob || 787 !concat->mtd._write_oob != !subdev[i]->_write_oob) { 788 kfree(concat); 789 printk("Incompatible OOB or ECC data on \"%s\"\n", 790 subdev[i]->name); 791 return NULL; 792 } 793 concat->subdev[i] = subdev[i]; 794 795 } 796 797 concat->mtd.ecclayout = subdev[0]->ecclayout; 798 799 concat->num_subdev = num_devs; 800 concat->mtd.name = name; 801 802 concat->mtd._erase = concat_erase; 803 concat->mtd._read = concat_read; 804 concat->mtd._write = concat_write; 805 concat->mtd._sync = concat_sync; 806 concat->mtd._lock = concat_lock; 807 concat->mtd._unlock = concat_unlock; 808 concat->mtd._suspend = concat_suspend; 809 concat->mtd._resume = concat_resume; 810 concat->mtd._get_unmapped_area = concat_get_unmapped_area; 811 812 /* 813 * Combine the erase block size info of the subdevices: 814 * 815 * first, walk the map of the new device and see how 816 * many changes in erase size we have 817 */ 818 max_erasesize = curr_erasesize = subdev[0]->erasesize; 819 num_erase_region = 1; 820 for (i = 0; i < num_devs; i++) { 821 if (subdev[i]->numeraseregions == 0) { 822 /* current subdevice has uniform erase size */ 823 if (subdev[i]->erasesize != curr_erasesize) { 824 /* if it differs from the last subdevice's erase size, count it */ 825 ++num_erase_region; 826 curr_erasesize = subdev[i]->erasesize; 827 if (curr_erasesize > max_erasesize) 828 max_erasesize = curr_erasesize; 829 } 830 } else { 831 /* current subdevice has variable erase size */ 832 int j; 833 for (j = 0; j < subdev[i]->numeraseregions; j++) { 834 835 /* walk the list of erase regions, count any changes */ 836 if (subdev[i]->eraseregions[j].erasesize != 837 curr_erasesize) { 838 ++num_erase_region; 839 curr_erasesize = 840 subdev[i]->eraseregions[j]. 841 erasesize; 842 if (curr_erasesize > max_erasesize) 843 max_erasesize = curr_erasesize; 844 } 845 } 846 } 847 } 848 849 if (num_erase_region == 1) { 850 /* 851 * All subdevices have the same uniform erase size. 852 * This is easy: 853 */ 854 concat->mtd.erasesize = curr_erasesize; 855 concat->mtd.numeraseregions = 0; 856 } else { 857 uint64_t tmp64; 858 859 /* 860 * erase block size varies across the subdevices: allocate 861 * space to store the data describing the variable erase regions 862 */ 863 struct mtd_erase_region_info *erase_region_p; 864 uint64_t begin, position; 865 866 concat->mtd.erasesize = max_erasesize; 867 concat->mtd.numeraseregions = num_erase_region; 868 concat->mtd.eraseregions = erase_region_p = 869 kmalloc(num_erase_region * 870 sizeof (struct mtd_erase_region_info), GFP_KERNEL); 871 if (!erase_region_p) { 872 kfree(concat); 873 printk 874 ("memory allocation error while creating erase region list" 875 " for device \"%s\"\n", name); 876 return NULL; 877 } 878 879 /* 880 * walk the map of the new device once more and fill in 881 * in erase region info: 882 */ 883 curr_erasesize = subdev[0]->erasesize; 884 begin = position = 0; 885 for (i = 0; i < num_devs; i++) { 886 if (subdev[i]->numeraseregions == 0) { 887 /* current subdevice has uniform erase size */ 888 if (subdev[i]->erasesize != curr_erasesize) { 889 /* 890 * fill in an mtd_erase_region_info structure for the area 891 * we have walked so far: 892 */ 893 erase_region_p->offset = begin; 894 erase_region_p->erasesize = 895 curr_erasesize; 896 tmp64 = position - begin; 897 do_div(tmp64, curr_erasesize); 898 erase_region_p->numblocks = tmp64; 899 begin = position; 900 901 curr_erasesize = subdev[i]->erasesize; 902 ++erase_region_p; 903 } 904 position += subdev[i]->size; 905 } else { 906 /* current subdevice has variable erase size */ 907 int j; 908 for (j = 0; j < subdev[i]->numeraseregions; j++) { 909 /* walk the list of erase regions, count any changes */ 910 if (subdev[i]->eraseregions[j]. 911 erasesize != curr_erasesize) { 912 erase_region_p->offset = begin; 913 erase_region_p->erasesize = 914 curr_erasesize; 915 tmp64 = position - begin; 916 do_div(tmp64, curr_erasesize); 917 erase_region_p->numblocks = tmp64; 918 begin = position; 919 920 curr_erasesize = 921 subdev[i]->eraseregions[j]. 922 erasesize; 923 ++erase_region_p; 924 } 925 position += 926 subdev[i]->eraseregions[j]. 927 numblocks * (uint64_t)curr_erasesize; 928 } 929 } 930 } 931 /* Now write the final entry */ 932 erase_region_p->offset = begin; 933 erase_region_p->erasesize = curr_erasesize; 934 tmp64 = position - begin; 935 do_div(tmp64, curr_erasesize); 936 erase_region_p->numblocks = tmp64; 937 } 938 939 return &concat->mtd; 940 } 941 942 /* 943 * This function destroys an MTD object obtained from concat_mtd_devs() 944 */ 945 946 void mtd_concat_destroy(struct mtd_info *mtd) 947 { 948 struct mtd_concat *concat = CONCAT(mtd); 949 if (concat->mtd.numeraseregions) 950 kfree(concat->mtd.eraseregions); 951 kfree(concat); 952 } 953 954 EXPORT_SYMBOL(mtd_concat_create); 955 EXPORT_SYMBOL(mtd_concat_destroy); 956 957 MODULE_LICENSE("GPL"); 958 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>"); 959 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices"); 960