1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * MTD device concatenation layer 4 * 5 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de> 6 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org> 7 * 8 * NAND support by Christian Gan <cgan@iders.ca> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/types.h> 16 #include <linux/backing-dev.h> 17 18 #include <linux/mtd/mtd.h> 19 #include <linux/mtd/concat.h> 20 21 #include <asm/div64.h> 22 23 /* 24 * Our storage structure: 25 * Subdev points to an array of pointers to struct mtd_info objects 26 * which is allocated along with this structure 27 * 28 */ 29 struct mtd_concat { 30 struct mtd_info mtd; 31 int num_subdev; 32 struct mtd_info **subdev; 33 }; 34 35 /* 36 * how to calculate the size required for the above structure, 37 * including the pointer array subdev points to: 38 */ 39 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \ 40 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *))) 41 42 /* 43 * Given a pointer to the MTD object in the mtd_concat structure, 44 * we can retrieve the pointer to that structure with this macro. 45 */ 46 #define CONCAT(x) ((struct mtd_concat *)(x)) 47 48 /* 49 * MTD methods which look up the relevant subdevice, translate the 50 * effective address and pass through to the subdevice. 51 */ 52 53 static int 54 concat_read(struct mtd_info *mtd, loff_t from, size_t len, 55 size_t * retlen, u_char * buf) 56 { 57 struct mtd_concat *concat = CONCAT(mtd); 58 int ret = 0, err; 59 int i; 60 61 for (i = 0; i < concat->num_subdev; i++) { 62 struct mtd_info *subdev = concat->subdev[i]; 63 size_t size, retsize; 64 65 if (from >= subdev->size) { 66 /* Not destined for this subdev */ 67 size = 0; 68 from -= subdev->size; 69 continue; 70 } 71 if (from + len > subdev->size) 72 /* First part goes into this subdev */ 73 size = subdev->size - from; 74 else 75 /* Entire transaction goes into this subdev */ 76 size = len; 77 78 err = mtd_read(subdev, from, size, &retsize, buf); 79 80 /* Save information about bitflips! */ 81 if (unlikely(err)) { 82 if (mtd_is_eccerr(err)) { 83 mtd->ecc_stats.failed++; 84 ret = err; 85 } else if (mtd_is_bitflip(err)) { 86 mtd->ecc_stats.corrected++; 87 /* Do not overwrite -EBADMSG !! */ 88 if (!ret) 89 ret = err; 90 } else 91 return err; 92 } 93 94 *retlen += retsize; 95 len -= size; 96 if (len == 0) 97 return ret; 98 99 buf += size; 100 from = 0; 101 } 102 return -EINVAL; 103 } 104 105 static int 106 concat_write(struct mtd_info *mtd, loff_t to, size_t len, 107 size_t * retlen, const u_char * buf) 108 { 109 struct mtd_concat *concat = CONCAT(mtd); 110 int err = -EINVAL; 111 int i; 112 113 for (i = 0; i < concat->num_subdev; i++) { 114 struct mtd_info *subdev = concat->subdev[i]; 115 size_t size, retsize; 116 117 if (to >= subdev->size) { 118 size = 0; 119 to -= subdev->size; 120 continue; 121 } 122 if (to + len > subdev->size) 123 size = subdev->size - to; 124 else 125 size = len; 126 127 err = mtd_write(subdev, to, size, &retsize, buf); 128 if (err) 129 break; 130 131 *retlen += retsize; 132 len -= size; 133 if (len == 0) 134 break; 135 136 err = -EINVAL; 137 buf += size; 138 to = 0; 139 } 140 return err; 141 } 142 143 static int 144 concat_writev(struct mtd_info *mtd, const struct kvec *vecs, 145 unsigned long count, loff_t to, size_t * retlen) 146 { 147 struct mtd_concat *concat = CONCAT(mtd); 148 struct kvec *vecs_copy; 149 unsigned long entry_low, entry_high; 150 size_t total_len = 0; 151 int i; 152 int err = -EINVAL; 153 154 /* Calculate total length of data */ 155 for (i = 0; i < count; i++) 156 total_len += vecs[i].iov_len; 157 158 /* Check alignment */ 159 if (mtd->writesize > 1) { 160 uint64_t __to = to; 161 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize)) 162 return -EINVAL; 163 } 164 165 /* make a copy of vecs */ 166 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL); 167 if (!vecs_copy) 168 return -ENOMEM; 169 170 entry_low = 0; 171 for (i = 0; i < concat->num_subdev; i++) { 172 struct mtd_info *subdev = concat->subdev[i]; 173 size_t size, wsize, retsize, old_iov_len; 174 175 if (to >= subdev->size) { 176 to -= subdev->size; 177 continue; 178 } 179 180 size = min_t(uint64_t, total_len, subdev->size - to); 181 wsize = size; /* store for future use */ 182 183 entry_high = entry_low; 184 while (entry_high < count) { 185 if (size <= vecs_copy[entry_high].iov_len) 186 break; 187 size -= vecs_copy[entry_high++].iov_len; 188 } 189 190 old_iov_len = vecs_copy[entry_high].iov_len; 191 vecs_copy[entry_high].iov_len = size; 192 193 err = mtd_writev(subdev, &vecs_copy[entry_low], 194 entry_high - entry_low + 1, to, &retsize); 195 196 vecs_copy[entry_high].iov_len = old_iov_len - size; 197 vecs_copy[entry_high].iov_base += size; 198 199 entry_low = entry_high; 200 201 if (err) 202 break; 203 204 *retlen += retsize; 205 total_len -= wsize; 206 207 if (total_len == 0) 208 break; 209 210 err = -EINVAL; 211 to = 0; 212 } 213 214 kfree(vecs_copy); 215 return err; 216 } 217 218 static int 219 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) 220 { 221 struct mtd_concat *concat = CONCAT(mtd); 222 struct mtd_oob_ops devops = *ops; 223 int i, err, ret = 0; 224 225 ops->retlen = ops->oobretlen = 0; 226 227 for (i = 0; i < concat->num_subdev; i++) { 228 struct mtd_info *subdev = concat->subdev[i]; 229 230 if (from >= subdev->size) { 231 from -= subdev->size; 232 continue; 233 } 234 235 /* partial read ? */ 236 if (from + devops.len > subdev->size) 237 devops.len = subdev->size - from; 238 239 err = mtd_read_oob(subdev, from, &devops); 240 ops->retlen += devops.retlen; 241 ops->oobretlen += devops.oobretlen; 242 243 /* Save information about bitflips! */ 244 if (unlikely(err)) { 245 if (mtd_is_eccerr(err)) { 246 mtd->ecc_stats.failed++; 247 ret = err; 248 } else if (mtd_is_bitflip(err)) { 249 mtd->ecc_stats.corrected++; 250 /* Do not overwrite -EBADMSG !! */ 251 if (!ret) 252 ret = err; 253 } else 254 return err; 255 } 256 257 if (devops.datbuf) { 258 devops.len = ops->len - ops->retlen; 259 if (!devops.len) 260 return ret; 261 devops.datbuf += devops.retlen; 262 } 263 if (devops.oobbuf) { 264 devops.ooblen = ops->ooblen - ops->oobretlen; 265 if (!devops.ooblen) 266 return ret; 267 devops.oobbuf += ops->oobretlen; 268 } 269 270 from = 0; 271 } 272 return -EINVAL; 273 } 274 275 static int 276 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) 277 { 278 struct mtd_concat *concat = CONCAT(mtd); 279 struct mtd_oob_ops devops = *ops; 280 int i, err; 281 282 if (!(mtd->flags & MTD_WRITEABLE)) 283 return -EROFS; 284 285 ops->retlen = ops->oobretlen = 0; 286 287 for (i = 0; i < concat->num_subdev; i++) { 288 struct mtd_info *subdev = concat->subdev[i]; 289 290 if (to >= subdev->size) { 291 to -= subdev->size; 292 continue; 293 } 294 295 /* partial write ? */ 296 if (to + devops.len > subdev->size) 297 devops.len = subdev->size - to; 298 299 err = mtd_write_oob(subdev, to, &devops); 300 ops->retlen += devops.retlen; 301 ops->oobretlen += devops.oobretlen; 302 if (err) 303 return err; 304 305 if (devops.datbuf) { 306 devops.len = ops->len - ops->retlen; 307 if (!devops.len) 308 return 0; 309 devops.datbuf += devops.retlen; 310 } 311 if (devops.oobbuf) { 312 devops.ooblen = ops->ooblen - ops->oobretlen; 313 if (!devops.ooblen) 314 return 0; 315 devops.oobbuf += devops.oobretlen; 316 } 317 to = 0; 318 } 319 return -EINVAL; 320 } 321 322 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) 323 { 324 struct mtd_concat *concat = CONCAT(mtd); 325 struct mtd_info *subdev; 326 int i, err; 327 uint64_t length, offset = 0; 328 struct erase_info *erase; 329 330 /* 331 * Check for proper erase block alignment of the to-be-erased area. 332 * It is easier to do this based on the super device's erase 333 * region info rather than looking at each particular sub-device 334 * in turn. 335 */ 336 if (!concat->mtd.numeraseregions) { 337 /* the easy case: device has uniform erase block size */ 338 if (instr->addr & (concat->mtd.erasesize - 1)) 339 return -EINVAL; 340 if (instr->len & (concat->mtd.erasesize - 1)) 341 return -EINVAL; 342 } else { 343 /* device has variable erase size */ 344 struct mtd_erase_region_info *erase_regions = 345 concat->mtd.eraseregions; 346 347 /* 348 * Find the erase region where the to-be-erased area begins: 349 */ 350 for (i = 0; i < concat->mtd.numeraseregions && 351 instr->addr >= erase_regions[i].offset; i++) ; 352 --i; 353 354 /* 355 * Now erase_regions[i] is the region in which the 356 * to-be-erased area begins. Verify that the starting 357 * offset is aligned to this region's erase size: 358 */ 359 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1)) 360 return -EINVAL; 361 362 /* 363 * now find the erase region where the to-be-erased area ends: 364 */ 365 for (; i < concat->mtd.numeraseregions && 366 (instr->addr + instr->len) >= erase_regions[i].offset; 367 ++i) ; 368 --i; 369 /* 370 * check if the ending offset is aligned to this region's erase size 371 */ 372 if (i < 0 || ((instr->addr + instr->len) & 373 (erase_regions[i].erasesize - 1))) 374 return -EINVAL; 375 } 376 377 /* make a local copy of instr to avoid modifying the caller's struct */ 378 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); 379 380 if (!erase) 381 return -ENOMEM; 382 383 *erase = *instr; 384 length = instr->len; 385 386 /* 387 * find the subdevice where the to-be-erased area begins, adjust 388 * starting offset to be relative to the subdevice start 389 */ 390 for (i = 0; i < concat->num_subdev; i++) { 391 subdev = concat->subdev[i]; 392 if (subdev->size <= erase->addr) { 393 erase->addr -= subdev->size; 394 offset += subdev->size; 395 } else { 396 break; 397 } 398 } 399 400 /* must never happen since size limit has been verified above */ 401 BUG_ON(i >= concat->num_subdev); 402 403 /* now do the erase: */ 404 err = 0; 405 for (; length > 0; i++) { 406 /* loop for all subdevices affected by this request */ 407 subdev = concat->subdev[i]; /* get current subdevice */ 408 409 /* limit length to subdevice's size: */ 410 if (erase->addr + length > subdev->size) 411 erase->len = subdev->size - erase->addr; 412 else 413 erase->len = length; 414 415 length -= erase->len; 416 if ((err = mtd_erase(subdev, erase))) { 417 /* sanity check: should never happen since 418 * block alignment has been checked above */ 419 BUG_ON(err == -EINVAL); 420 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 421 instr->fail_addr = erase->fail_addr + offset; 422 break; 423 } 424 /* 425 * erase->addr specifies the offset of the area to be 426 * erased *within the current subdevice*. It can be 427 * non-zero only the first time through this loop, i.e. 428 * for the first subdevice where blocks need to be erased. 429 * All the following erases must begin at the start of the 430 * current subdevice, i.e. at offset zero. 431 */ 432 erase->addr = 0; 433 offset += subdev->size; 434 } 435 kfree(erase); 436 437 return err; 438 } 439 440 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 441 { 442 struct mtd_concat *concat = CONCAT(mtd); 443 int i, err = -EINVAL; 444 445 for (i = 0; i < concat->num_subdev; i++) { 446 struct mtd_info *subdev = concat->subdev[i]; 447 uint64_t size; 448 449 if (ofs >= subdev->size) { 450 size = 0; 451 ofs -= subdev->size; 452 continue; 453 } 454 if (ofs + len > subdev->size) 455 size = subdev->size - ofs; 456 else 457 size = len; 458 459 err = mtd_lock(subdev, ofs, size); 460 if (err) 461 break; 462 463 len -= size; 464 if (len == 0) 465 break; 466 467 err = -EINVAL; 468 ofs = 0; 469 } 470 471 return err; 472 } 473 474 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 475 { 476 struct mtd_concat *concat = CONCAT(mtd); 477 int i, err = 0; 478 479 for (i = 0; i < concat->num_subdev; i++) { 480 struct mtd_info *subdev = concat->subdev[i]; 481 uint64_t size; 482 483 if (ofs >= subdev->size) { 484 size = 0; 485 ofs -= subdev->size; 486 continue; 487 } 488 if (ofs + len > subdev->size) 489 size = subdev->size - ofs; 490 else 491 size = len; 492 493 err = mtd_unlock(subdev, ofs, size); 494 if (err) 495 break; 496 497 len -= size; 498 if (len == 0) 499 break; 500 501 err = -EINVAL; 502 ofs = 0; 503 } 504 505 return err; 506 } 507 508 static void concat_sync(struct mtd_info *mtd) 509 { 510 struct mtd_concat *concat = CONCAT(mtd); 511 int i; 512 513 for (i = 0; i < concat->num_subdev; i++) { 514 struct mtd_info *subdev = concat->subdev[i]; 515 mtd_sync(subdev); 516 } 517 } 518 519 static int concat_suspend(struct mtd_info *mtd) 520 { 521 struct mtd_concat *concat = CONCAT(mtd); 522 int i, rc = 0; 523 524 for (i = 0; i < concat->num_subdev; i++) { 525 struct mtd_info *subdev = concat->subdev[i]; 526 if ((rc = mtd_suspend(subdev)) < 0) 527 return rc; 528 } 529 return rc; 530 } 531 532 static void concat_resume(struct mtd_info *mtd) 533 { 534 struct mtd_concat *concat = CONCAT(mtd); 535 int i; 536 537 for (i = 0; i < concat->num_subdev; i++) { 538 struct mtd_info *subdev = concat->subdev[i]; 539 mtd_resume(subdev); 540 } 541 } 542 543 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs) 544 { 545 struct mtd_concat *concat = CONCAT(mtd); 546 int i, res = 0; 547 548 if (!mtd_can_have_bb(concat->subdev[0])) 549 return res; 550 551 for (i = 0; i < concat->num_subdev; i++) { 552 struct mtd_info *subdev = concat->subdev[i]; 553 554 if (ofs >= subdev->size) { 555 ofs -= subdev->size; 556 continue; 557 } 558 559 res = mtd_block_isbad(subdev, ofs); 560 break; 561 } 562 563 return res; 564 } 565 566 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs) 567 { 568 struct mtd_concat *concat = CONCAT(mtd); 569 int i, err = -EINVAL; 570 571 for (i = 0; i < concat->num_subdev; i++) { 572 struct mtd_info *subdev = concat->subdev[i]; 573 574 if (ofs >= subdev->size) { 575 ofs -= subdev->size; 576 continue; 577 } 578 579 err = mtd_block_markbad(subdev, ofs); 580 if (!err) 581 mtd->ecc_stats.badblocks++; 582 break; 583 } 584 585 return err; 586 } 587 588 /* 589 * This function constructs a virtual MTD device by concatenating 590 * num_devs MTD devices. A pointer to the new device object is 591 * stored to *new_dev upon success. This function does _not_ 592 * register any devices: this is the caller's responsibility. 593 */ 594 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */ 595 int num_devs, /* number of subdevices */ 596 const char *name) 597 { /* name for the new device */ 598 int i; 599 size_t size; 600 struct mtd_concat *concat; 601 uint32_t max_erasesize, curr_erasesize; 602 int num_erase_region; 603 int max_writebufsize = 0; 604 605 printk(KERN_NOTICE "Concatenating MTD devices:\n"); 606 for (i = 0; i < num_devs; i++) 607 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name); 608 printk(KERN_NOTICE "into device \"%s\"\n", name); 609 610 /* allocate the device structure */ 611 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs); 612 concat = kzalloc(size, GFP_KERNEL); 613 if (!concat) { 614 printk 615 ("memory allocation error while creating concatenated device \"%s\"\n", 616 name); 617 return NULL; 618 } 619 concat->subdev = (struct mtd_info **) (concat + 1); 620 621 /* 622 * Set up the new "super" device's MTD object structure, check for 623 * incompatibilities between the subdevices. 624 */ 625 concat->mtd.type = subdev[0]->type; 626 concat->mtd.flags = subdev[0]->flags; 627 concat->mtd.size = subdev[0]->size; 628 concat->mtd.erasesize = subdev[0]->erasesize; 629 concat->mtd.writesize = subdev[0]->writesize; 630 631 for (i = 0; i < num_devs; i++) 632 if (max_writebufsize < subdev[i]->writebufsize) 633 max_writebufsize = subdev[i]->writebufsize; 634 concat->mtd.writebufsize = max_writebufsize; 635 636 concat->mtd.subpage_sft = subdev[0]->subpage_sft; 637 concat->mtd.oobsize = subdev[0]->oobsize; 638 concat->mtd.oobavail = subdev[0]->oobavail; 639 if (subdev[0]->_writev) 640 concat->mtd._writev = concat_writev; 641 if (subdev[0]->_read_oob) 642 concat->mtd._read_oob = concat_read_oob; 643 if (subdev[0]->_write_oob) 644 concat->mtd._write_oob = concat_write_oob; 645 if (subdev[0]->_block_isbad) 646 concat->mtd._block_isbad = concat_block_isbad; 647 if (subdev[0]->_block_markbad) 648 concat->mtd._block_markbad = concat_block_markbad; 649 650 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; 651 652 concat->subdev[0] = subdev[0]; 653 654 for (i = 1; i < num_devs; i++) { 655 if (concat->mtd.type != subdev[i]->type) { 656 kfree(concat); 657 printk("Incompatible device type on \"%s\"\n", 658 subdev[i]->name); 659 return NULL; 660 } 661 if (concat->mtd.flags != subdev[i]->flags) { 662 /* 663 * Expect all flags except MTD_WRITEABLE to be 664 * equal on all subdevices. 665 */ 666 if ((concat->mtd.flags ^ subdev[i]-> 667 flags) & ~MTD_WRITEABLE) { 668 kfree(concat); 669 printk("Incompatible device flags on \"%s\"\n", 670 subdev[i]->name); 671 return NULL; 672 } else 673 /* if writeable attribute differs, 674 make super device writeable */ 675 concat->mtd.flags |= 676 subdev[i]->flags & MTD_WRITEABLE; 677 } 678 679 concat->mtd.size += subdev[i]->size; 680 concat->mtd.ecc_stats.badblocks += 681 subdev[i]->ecc_stats.badblocks; 682 if (concat->mtd.writesize != subdev[i]->writesize || 683 concat->mtd.subpage_sft != subdev[i]->subpage_sft || 684 concat->mtd.oobsize != subdev[i]->oobsize || 685 !concat->mtd._read_oob != !subdev[i]->_read_oob || 686 !concat->mtd._write_oob != !subdev[i]->_write_oob) { 687 kfree(concat); 688 printk("Incompatible OOB or ECC data on \"%s\"\n", 689 subdev[i]->name); 690 return NULL; 691 } 692 concat->subdev[i] = subdev[i]; 693 694 } 695 696 mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout); 697 698 concat->num_subdev = num_devs; 699 concat->mtd.name = name; 700 701 concat->mtd._erase = concat_erase; 702 concat->mtd._read = concat_read; 703 concat->mtd._write = concat_write; 704 concat->mtd._sync = concat_sync; 705 concat->mtd._lock = concat_lock; 706 concat->mtd._unlock = concat_unlock; 707 concat->mtd._suspend = concat_suspend; 708 concat->mtd._resume = concat_resume; 709 710 /* 711 * Combine the erase block size info of the subdevices: 712 * 713 * first, walk the map of the new device and see how 714 * many changes in erase size we have 715 */ 716 max_erasesize = curr_erasesize = subdev[0]->erasesize; 717 num_erase_region = 1; 718 for (i = 0; i < num_devs; i++) { 719 if (subdev[i]->numeraseregions == 0) { 720 /* current subdevice has uniform erase size */ 721 if (subdev[i]->erasesize != curr_erasesize) { 722 /* if it differs from the last subdevice's erase size, count it */ 723 ++num_erase_region; 724 curr_erasesize = subdev[i]->erasesize; 725 if (curr_erasesize > max_erasesize) 726 max_erasesize = curr_erasesize; 727 } 728 } else { 729 /* current subdevice has variable erase size */ 730 int j; 731 for (j = 0; j < subdev[i]->numeraseregions; j++) { 732 733 /* walk the list of erase regions, count any changes */ 734 if (subdev[i]->eraseregions[j].erasesize != 735 curr_erasesize) { 736 ++num_erase_region; 737 curr_erasesize = 738 subdev[i]->eraseregions[j]. 739 erasesize; 740 if (curr_erasesize > max_erasesize) 741 max_erasesize = curr_erasesize; 742 } 743 } 744 } 745 } 746 747 if (num_erase_region == 1) { 748 /* 749 * All subdevices have the same uniform erase size. 750 * This is easy: 751 */ 752 concat->mtd.erasesize = curr_erasesize; 753 concat->mtd.numeraseregions = 0; 754 } else { 755 uint64_t tmp64; 756 757 /* 758 * erase block size varies across the subdevices: allocate 759 * space to store the data describing the variable erase regions 760 */ 761 struct mtd_erase_region_info *erase_region_p; 762 uint64_t begin, position; 763 764 concat->mtd.erasesize = max_erasesize; 765 concat->mtd.numeraseregions = num_erase_region; 766 concat->mtd.eraseregions = erase_region_p = 767 kmalloc_array(num_erase_region, 768 sizeof(struct mtd_erase_region_info), 769 GFP_KERNEL); 770 if (!erase_region_p) { 771 kfree(concat); 772 printk 773 ("memory allocation error while creating erase region list" 774 " for device \"%s\"\n", name); 775 return NULL; 776 } 777 778 /* 779 * walk the map of the new device once more and fill in 780 * in erase region info: 781 */ 782 curr_erasesize = subdev[0]->erasesize; 783 begin = position = 0; 784 for (i = 0; i < num_devs; i++) { 785 if (subdev[i]->numeraseregions == 0) { 786 /* current subdevice has uniform erase size */ 787 if (subdev[i]->erasesize != curr_erasesize) { 788 /* 789 * fill in an mtd_erase_region_info structure for the area 790 * we have walked so far: 791 */ 792 erase_region_p->offset = begin; 793 erase_region_p->erasesize = 794 curr_erasesize; 795 tmp64 = position - begin; 796 do_div(tmp64, curr_erasesize); 797 erase_region_p->numblocks = tmp64; 798 begin = position; 799 800 curr_erasesize = subdev[i]->erasesize; 801 ++erase_region_p; 802 } 803 position += subdev[i]->size; 804 } else { 805 /* current subdevice has variable erase size */ 806 int j; 807 for (j = 0; j < subdev[i]->numeraseregions; j++) { 808 /* walk the list of erase regions, count any changes */ 809 if (subdev[i]->eraseregions[j]. 810 erasesize != curr_erasesize) { 811 erase_region_p->offset = begin; 812 erase_region_p->erasesize = 813 curr_erasesize; 814 tmp64 = position - begin; 815 do_div(tmp64, curr_erasesize); 816 erase_region_p->numblocks = tmp64; 817 begin = position; 818 819 curr_erasesize = 820 subdev[i]->eraseregions[j]. 821 erasesize; 822 ++erase_region_p; 823 } 824 position += 825 subdev[i]->eraseregions[j]. 826 numblocks * (uint64_t)curr_erasesize; 827 } 828 } 829 } 830 /* Now write the final entry */ 831 erase_region_p->offset = begin; 832 erase_region_p->erasesize = curr_erasesize; 833 tmp64 = position - begin; 834 do_div(tmp64, curr_erasesize); 835 erase_region_p->numblocks = tmp64; 836 } 837 838 return &concat->mtd; 839 } 840 841 /* 842 * This function destroys an MTD object obtained from concat_mtd_devs() 843 */ 844 845 void mtd_concat_destroy(struct mtd_info *mtd) 846 { 847 struct mtd_concat *concat = CONCAT(mtd); 848 if (concat->mtd.numeraseregions) 849 kfree(concat->mtd.eraseregions); 850 kfree(concat); 851 } 852 853 EXPORT_SYMBOL(mtd_concat_create); 854 EXPORT_SYMBOL(mtd_concat_destroy); 855 856 MODULE_LICENSE("GPL"); 857 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>"); 858 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices"); 859