1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2009 - Maxim Levitsky 4 * SmartMedia/xD translation layer 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/random.h> 10 #include <linux/hdreg.h> 11 #include <linux/kthread.h> 12 #include <linux/freezer.h> 13 #include <linux/sysfs.h> 14 #include <linux/bitops.h> 15 #include <linux/slab.h> 16 #include <linux/mtd/nand-ecc-sw-hamming.h> 17 #include "nand/raw/sm_common.h" 18 #include "sm_ftl.h" 19 20 21 22 static struct workqueue_struct *cache_flush_workqueue; 23 24 static int cache_timeout = 1000; 25 module_param(cache_timeout, int, S_IRUGO); 26 MODULE_PARM_DESC(cache_timeout, 27 "Timeout (in ms) for cache flush (1000 ms default"); 28 29 static int debug; 30 module_param(debug, int, S_IRUGO | S_IWUSR); 31 MODULE_PARM_DESC(debug, "Debug level (0-2)"); 32 33 34 /* ------------------- sysfs attributes ---------------------------------- */ 35 struct sm_sysfs_attribute { 36 struct device_attribute dev_attr; 37 char *data; 38 int len; 39 }; 40 41 static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr, 42 char *buf) 43 { 44 struct sm_sysfs_attribute *sm_attr = 45 container_of(attr, struct sm_sysfs_attribute, dev_attr); 46 47 return sysfs_emit(buf, "%.*s", sm_attr->len, sm_attr->data); 48 } 49 50 51 #define NUM_ATTRIBUTES 1 52 #define SM_CIS_VENDOR_OFFSET 0x59 53 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) 54 { 55 struct attribute_group *attr_group; 56 struct attribute **attributes; 57 struct sm_sysfs_attribute *vendor_attribute; 58 char *vendor; 59 60 vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, 61 SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL); 62 if (!vendor) 63 goto error1; 64 65 /* Initialize sysfs attributes */ 66 vendor_attribute = 67 kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL); 68 if (!vendor_attribute) 69 goto error2; 70 71 sysfs_attr_init(&vendor_attribute->dev_attr.attr); 72 73 vendor_attribute->data = vendor; 74 vendor_attribute->len = strlen(vendor); 75 vendor_attribute->dev_attr.attr.name = "vendor"; 76 vendor_attribute->dev_attr.attr.mode = S_IRUGO; 77 vendor_attribute->dev_attr.show = sm_attr_show; 78 79 80 /* Create array of pointers to the attributes */ 81 attributes = kcalloc(NUM_ATTRIBUTES + 1, sizeof(struct attribute *), 82 GFP_KERNEL); 83 if (!attributes) 84 goto error3; 85 attributes[0] = &vendor_attribute->dev_attr.attr; 86 87 /* Finally create the attribute group */ 88 attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); 89 if (!attr_group) 90 goto error4; 91 attr_group->attrs = attributes; 92 return attr_group; 93 error4: 94 kfree(attributes); 95 error3: 96 kfree(vendor_attribute); 97 error2: 98 kfree(vendor); 99 error1: 100 return NULL; 101 } 102 103 static void sm_delete_sysfs_attributes(struct sm_ftl *ftl) 104 { 105 struct attribute **attributes = ftl->disk_attributes->attrs; 106 int i; 107 108 for (i = 0; attributes[i] ; i++) { 109 110 struct device_attribute *dev_attr = container_of(attributes[i], 111 struct device_attribute, attr); 112 113 struct sm_sysfs_attribute *sm_attr = 114 container_of(dev_attr, 115 struct sm_sysfs_attribute, dev_attr); 116 117 kfree(sm_attr->data); 118 kfree(sm_attr); 119 } 120 121 kfree(ftl->disk_attributes->attrs); 122 kfree(ftl->disk_attributes); 123 } 124 125 126 /* ----------------------- oob helpers -------------------------------------- */ 127 128 static int sm_get_lba(uint8_t *lba) 129 { 130 /* check fixed bits */ 131 if ((lba[0] & 0xF8) != 0x10) 132 return -2; 133 134 /* check parity - endianness doesn't matter */ 135 if (hweight16(*(uint16_t *)lba) & 1) 136 return -2; 137 138 return (lba[1] >> 1) | ((lba[0] & 0x07) << 7); 139 } 140 141 142 /* 143 * Read LBA associated with block 144 * returns -1, if block is erased 145 * returns -2 if error happens 146 */ 147 static int sm_read_lba(struct sm_oob *oob) 148 { 149 static const uint32_t erased_pattern[4] = { 150 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; 151 152 uint16_t lba_test; 153 int lba; 154 155 /* First test for erased block */ 156 if (!memcmp(oob, erased_pattern, SM_OOB_SIZE)) 157 return -1; 158 159 /* Now check if both copies of the LBA differ too much */ 160 lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2; 161 if (lba_test && !is_power_of_2(lba_test)) 162 return -2; 163 164 /* And read it */ 165 lba = sm_get_lba(oob->lba_copy1); 166 167 if (lba == -2) 168 lba = sm_get_lba(oob->lba_copy2); 169 170 return lba; 171 } 172 173 static void sm_write_lba(struct sm_oob *oob, uint16_t lba) 174 { 175 uint8_t tmp[2]; 176 177 WARN_ON(lba >= 1000); 178 179 tmp[0] = 0x10 | ((lba >> 7) & 0x07); 180 tmp[1] = (lba << 1) & 0xFF; 181 182 if (hweight16(*(uint16_t *)tmp) & 0x01) 183 tmp[1] |= 1; 184 185 oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0]; 186 oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1]; 187 } 188 189 190 /* Make offset from parts */ 191 static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset) 192 { 193 WARN_ON(boffset & (SM_SECTOR_SIZE - 1)); 194 WARN_ON(zone < 0 || zone >= ftl->zone_count); 195 WARN_ON(block >= ftl->zone_size); 196 WARN_ON(boffset >= ftl->block_size); 197 198 if (block == -1) 199 return -1; 200 201 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset; 202 } 203 204 /* Breaks offset into parts */ 205 static void sm_break_offset(struct sm_ftl *ftl, loff_t loffset, 206 int *zone, int *block, int *boffset) 207 { 208 u64 offset = loffset; 209 *boffset = do_div(offset, ftl->block_size); 210 *block = do_div(offset, ftl->max_lba); 211 *zone = offset >= ftl->zone_count ? -1 : offset; 212 } 213 214 /* ---------------------- low level IO ------------------------------------- */ 215 216 static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob) 217 { 218 bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC); 219 uint8_t ecc[3]; 220 221 ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order); 222 if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc1, SM_SMALL_PAGE, 223 sm_order) < 0) 224 return -EIO; 225 226 buffer += SM_SMALL_PAGE; 227 228 ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order); 229 if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc2, SM_SMALL_PAGE, 230 sm_order) < 0) 231 return -EIO; 232 return 0; 233 } 234 235 /* Reads a sector + oob*/ 236 static int sm_read_sector(struct sm_ftl *ftl, 237 int zone, int block, int boffset, 238 uint8_t *buffer, struct sm_oob *oob) 239 { 240 struct mtd_info *mtd = ftl->trans->mtd; 241 struct mtd_oob_ops ops = { }; 242 struct sm_oob tmp_oob; 243 int ret = -EIO; 244 int try = 0; 245 246 /* FTL can contain -1 entries that are by default filled with bits */ 247 if (block == -1) { 248 if (buffer) 249 memset(buffer, 0xFF, SM_SECTOR_SIZE); 250 return 0; 251 } 252 253 /* User might not need the oob, but we do for data verification */ 254 if (!oob) 255 oob = &tmp_oob; 256 257 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB; 258 ops.ooboffs = 0; 259 ops.ooblen = SM_OOB_SIZE; 260 ops.oobbuf = (void *)oob; 261 ops.len = SM_SECTOR_SIZE; 262 ops.datbuf = buffer; 263 264 again: 265 if (try++) { 266 /* Avoid infinite recursion on CIS reads, sm_recheck_media 267 * won't help anyway 268 */ 269 if (zone == 0 && block == ftl->cis_block && boffset == 270 ftl->cis_boffset) 271 return ret; 272 273 /* Test if media is stable */ 274 if (try == 3 || sm_recheck_media(ftl)) 275 return ret; 276 } 277 278 /* Unfortunately, oob read will _always_ succeed, 279 * despite card removal..... 280 */ 281 ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 282 283 /* Test for unknown errors */ 284 if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) { 285 dbg("read of block %d at zone %d, failed due to error (%d)", 286 block, zone, ret); 287 goto again; 288 } 289 290 /* Do a basic test on the oob, to guard against returned garbage */ 291 if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved)) 292 goto again; 293 294 /* This should never happen, unless there is a bug in the mtd driver */ 295 WARN_ON(ops.oobretlen != SM_OOB_SIZE); 296 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE); 297 298 if (!buffer) 299 return 0; 300 301 /* Test if sector marked as bad */ 302 if (!sm_sector_valid(oob)) { 303 dbg("read of block %d at zone %d, failed because it is marked" 304 " as bad" , block, zone); 305 goto again; 306 } 307 308 /* Test ECC*/ 309 if (mtd_is_eccerr(ret) || 310 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) { 311 312 dbg("read of block %d at zone %d, failed due to ECC error", 313 block, zone); 314 goto again; 315 } 316 317 return 0; 318 } 319 320 /* Writes a sector to media */ 321 static int sm_write_sector(struct sm_ftl *ftl, 322 int zone, int block, int boffset, 323 uint8_t *buffer, struct sm_oob *oob) 324 { 325 struct mtd_oob_ops ops = { }; 326 struct mtd_info *mtd = ftl->trans->mtd; 327 int ret; 328 329 BUG_ON(ftl->readonly); 330 331 if (zone == 0 && (block == ftl->cis_block || block == 0)) { 332 dbg("attempted to write the CIS!"); 333 return -EIO; 334 } 335 336 if (ftl->unstable) 337 return -EIO; 338 339 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB; 340 ops.len = SM_SECTOR_SIZE; 341 ops.datbuf = buffer; 342 ops.ooboffs = 0; 343 ops.ooblen = SM_OOB_SIZE; 344 ops.oobbuf = (void *)oob; 345 346 ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 347 348 /* Now we assume that hardware will catch write bitflip errors */ 349 350 if (ret) { 351 dbg("write to block %d at zone %d, failed with error %d", 352 block, zone, ret); 353 354 sm_recheck_media(ftl); 355 return ret; 356 } 357 358 /* This should never happen, unless there is a bug in the driver */ 359 WARN_ON(ops.oobretlen != SM_OOB_SIZE); 360 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE); 361 362 return 0; 363 } 364 365 /* ------------------------ block IO ------------------------------------- */ 366 367 /* Write a block using data and lba, and invalid sector bitmap */ 368 static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf, 369 int zone, int block, int lba, 370 unsigned long invalid_bitmap) 371 { 372 bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC); 373 struct sm_oob oob; 374 int boffset; 375 int retry = 0; 376 377 /* Initialize the oob with requested values */ 378 memset(&oob, 0xFF, SM_OOB_SIZE); 379 sm_write_lba(&oob, lba); 380 restart: 381 if (ftl->unstable) 382 return -EIO; 383 384 for (boffset = 0; boffset < ftl->block_size; 385 boffset += SM_SECTOR_SIZE) { 386 387 oob.data_status = 0xFF; 388 389 if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) { 390 391 sm_printk("sector %d of block at LBA %d of zone %d" 392 " couldn't be read, marking it as invalid", 393 boffset / SM_SECTOR_SIZE, lba, zone); 394 395 oob.data_status = 0; 396 } 397 398 if (ftl->smallpagenand) { 399 ecc_sw_hamming_calculate(buf + boffset, 400 SM_SMALL_PAGE, oob.ecc1, 401 sm_order); 402 403 ecc_sw_hamming_calculate(buf + boffset + SM_SMALL_PAGE, 404 SM_SMALL_PAGE, oob.ecc2, 405 sm_order); 406 } 407 if (!sm_write_sector(ftl, zone, block, boffset, 408 buf + boffset, &oob)) 409 continue; 410 411 if (!retry) { 412 413 /* If write fails. try to erase the block */ 414 /* This is safe, because we never write in blocks 415 * that contain valuable data. 416 * This is intended to repair block that are marked 417 * as erased, but that isn't fully erased 418 */ 419 420 if (sm_erase_block(ftl, zone, block, 0)) 421 return -EIO; 422 423 retry = 1; 424 goto restart; 425 } else { 426 sm_mark_block_bad(ftl, zone, block); 427 return -EIO; 428 } 429 } 430 return 0; 431 } 432 433 434 /* Mark whole block at offset 'offs' as bad. */ 435 static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block) 436 { 437 struct sm_oob oob; 438 int boffset; 439 440 memset(&oob, 0xFF, SM_OOB_SIZE); 441 oob.block_status = 0xF0; 442 443 if (ftl->unstable) 444 return; 445 446 if (sm_recheck_media(ftl)) 447 return; 448 449 sm_printk("marking block %d of zone %d as bad", block, zone); 450 451 /* We aren't checking the return value, because we don't care */ 452 /* This also fails on fake xD cards, but I guess these won't expose 453 * any bad blocks till fail completely 454 */ 455 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) 456 sm_write_sector(ftl, zone, block, boffset, NULL, &oob); 457 } 458 459 /* 460 * Erase a block within a zone 461 * If erase succeeds, it updates free block fifo, otherwise marks block as bad 462 */ 463 static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, 464 int put_free) 465 { 466 struct ftl_zone *zone = &ftl->zones[zone_num]; 467 struct mtd_info *mtd = ftl->trans->mtd; 468 struct erase_info erase; 469 470 erase.addr = sm_mkoffset(ftl, zone_num, block, 0); 471 erase.len = ftl->block_size; 472 473 if (ftl->unstable) 474 return -EIO; 475 476 BUG_ON(ftl->readonly); 477 478 if (zone_num == 0 && (block == ftl->cis_block || block == 0)) { 479 sm_printk("attempted to erase the CIS!"); 480 return -EIO; 481 } 482 483 if (mtd_erase(mtd, &erase)) { 484 sm_printk("erase of block %d in zone %d failed", 485 block, zone_num); 486 goto error; 487 } 488 489 if (put_free) 490 kfifo_in(&zone->free_sectors, 491 (const unsigned char *)&block, sizeof(block)); 492 493 return 0; 494 error: 495 sm_mark_block_bad(ftl, zone_num, block); 496 return -EIO; 497 } 498 499 /* Thoroughly test that block is valid. */ 500 static int sm_check_block(struct sm_ftl *ftl, int zone, int block) 501 { 502 int boffset; 503 struct sm_oob oob; 504 int lbas[] = { -3, 0, 0, 0 }; 505 int i = 0; 506 int test_lba; 507 508 509 /* First just check that block doesn't look fishy */ 510 /* Only blocks that are valid or are sliced in two parts, are 511 * accepted 512 */ 513 for (boffset = 0; boffset < ftl->block_size; 514 boffset += SM_SECTOR_SIZE) { 515 516 /* This shouldn't happen anyway */ 517 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob)) 518 return -2; 519 520 test_lba = sm_read_lba(&oob); 521 522 if (lbas[i] != test_lba) 523 lbas[++i] = test_lba; 524 525 /* If we found three different LBAs, something is fishy */ 526 if (i == 3) 527 return -EIO; 528 } 529 530 /* If the block is sliced (partially erased usually) erase it */ 531 if (i == 2) { 532 sm_erase_block(ftl, zone, block, 1); 533 return 1; 534 } 535 536 return 0; 537 } 538 539 /* ----------------- media scanning --------------------------------- */ 540 static const struct chs_entry chs_table[] = { 541 { 1, 125, 4, 4 }, 542 { 2, 125, 4, 8 }, 543 { 4, 250, 4, 8 }, 544 { 8, 250, 4, 16 }, 545 { 16, 500, 4, 16 }, 546 { 32, 500, 8, 16 }, 547 { 64, 500, 8, 32 }, 548 { 128, 500, 16, 32 }, 549 { 256, 1000, 16, 32 }, 550 { 512, 1015, 32, 63 }, 551 { 1024, 985, 33, 63 }, 552 { 2048, 985, 33, 63 }, 553 { 0 }, 554 }; 555 556 557 static const uint8_t cis_signature[] = { 558 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20 559 }; 560 /* Find out media parameters. 561 * This ideally has to be based on nand id, but for now device size is enough 562 */ 563 static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd) 564 { 565 int i; 566 int size_in_megs = mtd->size / (1024 * 1024); 567 568 ftl->readonly = mtd->type == MTD_ROM; 569 570 /* Manual settings for very old devices */ 571 ftl->zone_count = 1; 572 ftl->smallpagenand = 0; 573 574 switch (size_in_megs) { 575 case 1: 576 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/ 577 ftl->zone_size = 256; 578 ftl->max_lba = 250; 579 ftl->block_size = 8 * SM_SECTOR_SIZE; 580 ftl->smallpagenand = 1; 581 582 break; 583 case 2: 584 /* 2 MiB flash SmartMedia (256 byte pages)*/ 585 if (mtd->writesize == SM_SMALL_PAGE) { 586 ftl->zone_size = 512; 587 ftl->max_lba = 500; 588 ftl->block_size = 8 * SM_SECTOR_SIZE; 589 ftl->smallpagenand = 1; 590 /* 2 MiB rom SmartMedia */ 591 } else { 592 593 if (!ftl->readonly) 594 return -ENODEV; 595 596 ftl->zone_size = 256; 597 ftl->max_lba = 250; 598 ftl->block_size = 16 * SM_SECTOR_SIZE; 599 } 600 break; 601 case 4: 602 /* 4 MiB flash/rom SmartMedia device */ 603 ftl->zone_size = 512; 604 ftl->max_lba = 500; 605 ftl->block_size = 16 * SM_SECTOR_SIZE; 606 break; 607 case 8: 608 /* 8 MiB flash/rom SmartMedia device */ 609 ftl->zone_size = 1024; 610 ftl->max_lba = 1000; 611 ftl->block_size = 16 * SM_SECTOR_SIZE; 612 } 613 614 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone 615 * sizes. SmartMedia cards exist up to 128 MiB and have same layout 616 */ 617 if (size_in_megs >= 16) { 618 ftl->zone_count = size_in_megs / 16; 619 ftl->zone_size = 1024; 620 ftl->max_lba = 1000; 621 ftl->block_size = 32 * SM_SECTOR_SIZE; 622 } 623 624 /* Test for proper write,erase and oob sizes */ 625 if (mtd->erasesize > ftl->block_size) 626 return -ENODEV; 627 628 if (mtd->writesize > SM_SECTOR_SIZE) 629 return -ENODEV; 630 631 if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE) 632 return -ENODEV; 633 634 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE) 635 return -ENODEV; 636 637 /* We use OOB */ 638 if (!mtd_has_oob(mtd)) 639 return -ENODEV; 640 641 /* Find geometry information */ 642 for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) { 643 if (chs_table[i].size == size_in_megs) { 644 ftl->cylinders = chs_table[i].cyl; 645 ftl->heads = chs_table[i].head; 646 ftl->sectors = chs_table[i].sec; 647 return 0; 648 } 649 } 650 651 sm_printk("media has unknown size : %dMiB", size_in_megs); 652 ftl->cylinders = 985; 653 ftl->heads = 33; 654 ftl->sectors = 63; 655 return 0; 656 } 657 658 /* Validate the CIS */ 659 static int sm_read_cis(struct sm_ftl *ftl) 660 { 661 struct sm_oob oob; 662 663 if (sm_read_sector(ftl, 664 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob)) 665 return -EIO; 666 667 if (!sm_sector_valid(&oob) || !sm_block_valid(&oob)) 668 return -EIO; 669 670 if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset, 671 cis_signature, sizeof(cis_signature))) { 672 return 0; 673 } 674 675 return -EIO; 676 } 677 678 /* Scan the media for the CIS */ 679 static int sm_find_cis(struct sm_ftl *ftl) 680 { 681 struct sm_oob oob; 682 int block, boffset; 683 int block_found = 0; 684 int cis_found = 0; 685 686 /* Search for first valid block */ 687 for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) { 688 689 if (sm_read_sector(ftl, 0, block, 0, NULL, &oob)) 690 continue; 691 692 if (!sm_block_valid(&oob)) 693 continue; 694 block_found = 1; 695 break; 696 } 697 698 if (!block_found) 699 return -EIO; 700 701 /* Search for first valid sector in this block */ 702 for (boffset = 0 ; boffset < ftl->block_size; 703 boffset += SM_SECTOR_SIZE) { 704 705 if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob)) 706 continue; 707 708 if (!sm_sector_valid(&oob)) 709 continue; 710 break; 711 } 712 713 if (boffset == ftl->block_size) 714 return -EIO; 715 716 ftl->cis_block = block; 717 ftl->cis_boffset = boffset; 718 ftl->cis_page_offset = 0; 719 720 cis_found = !sm_read_cis(ftl); 721 722 if (!cis_found) { 723 ftl->cis_page_offset = SM_SMALL_PAGE; 724 cis_found = !sm_read_cis(ftl); 725 } 726 727 if (cis_found) { 728 dbg("CIS block found at offset %x", 729 block * ftl->block_size + 730 boffset + ftl->cis_page_offset); 731 return 0; 732 } 733 return -EIO; 734 } 735 736 /* Basic test to determine if underlying mtd device if functional */ 737 static int sm_recheck_media(struct sm_ftl *ftl) 738 { 739 if (sm_read_cis(ftl)) { 740 741 if (!ftl->unstable) { 742 sm_printk("media unstable, not allowing writes"); 743 ftl->unstable = 1; 744 } 745 return -EIO; 746 } 747 return 0; 748 } 749 750 /* Initialize a FTL zone */ 751 static int sm_init_zone(struct sm_ftl *ftl, int zone_num) 752 { 753 struct ftl_zone *zone = &ftl->zones[zone_num]; 754 struct sm_oob oob; 755 uint16_t block; 756 int lba; 757 int i = 0; 758 int len; 759 760 dbg("initializing zone %d", zone_num); 761 762 /* Allocate memory for FTL table */ 763 zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL); 764 765 if (!zone->lba_to_phys_table) 766 return -ENOMEM; 767 memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2); 768 769 770 /* Allocate memory for free sectors FIFO */ 771 if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) { 772 kfree(zone->lba_to_phys_table); 773 return -ENOMEM; 774 } 775 776 /* Now scan the zone */ 777 for (block = 0 ; block < ftl->zone_size ; block++) { 778 779 /* Skip blocks till the CIS (including) */ 780 if (zone_num == 0 && block <= ftl->cis_block) 781 continue; 782 783 /* Read the oob of first sector */ 784 if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) { 785 kfifo_free(&zone->free_sectors); 786 kfree(zone->lba_to_phys_table); 787 return -EIO; 788 } 789 790 /* Test to see if block is erased. It is enough to test 791 * first sector, because erase happens in one shot 792 */ 793 if (sm_block_erased(&oob)) { 794 kfifo_in(&zone->free_sectors, 795 (unsigned char *)&block, 2); 796 continue; 797 } 798 799 /* If block is marked as bad, skip it */ 800 /* This assumes we can trust first sector*/ 801 /* However the way the block valid status is defined, ensures 802 * very low probability of failure here 803 */ 804 if (!sm_block_valid(&oob)) { 805 dbg("PH %04d <-> <marked bad>", block); 806 continue; 807 } 808 809 810 lba = sm_read_lba(&oob); 811 812 /* Invalid LBA means that block is damaged. */ 813 /* We can try to erase it, or mark it as bad, but 814 * lets leave that to recovery application 815 */ 816 if (lba == -2 || lba >= ftl->max_lba) { 817 dbg("PH %04d <-> LBA %04d(bad)", block, lba); 818 continue; 819 } 820 821 822 /* If there is no collision, 823 * just put the sector in the FTL table 824 */ 825 if (zone->lba_to_phys_table[lba] < 0) { 826 dbg_verbose("PH %04d <-> LBA %04d", block, lba); 827 zone->lba_to_phys_table[lba] = block; 828 continue; 829 } 830 831 sm_printk("collision" 832 " of LBA %d between blocks %d and %d in zone %d", 833 lba, zone->lba_to_phys_table[lba], block, zone_num); 834 835 /* Test that this block is valid*/ 836 if (sm_check_block(ftl, zone_num, block)) 837 continue; 838 839 /* Test now the old block */ 840 if (sm_check_block(ftl, zone_num, 841 zone->lba_to_phys_table[lba])) { 842 zone->lba_to_phys_table[lba] = block; 843 continue; 844 } 845 846 /* If both blocks are valid and share same LBA, it means that 847 * they hold different versions of same data. It not 848 * known which is more recent, thus just erase one of them 849 */ 850 sm_printk("both blocks are valid, erasing the later"); 851 sm_erase_block(ftl, zone_num, block, 1); 852 } 853 854 dbg("zone initialized"); 855 zone->initialized = 1; 856 857 /* No free sectors, means that the zone is heavily damaged, write won't 858 * work, but it can still can be (partially) read 859 */ 860 if (!kfifo_len(&zone->free_sectors)) { 861 sm_printk("no free blocks in zone %d", zone_num); 862 return 0; 863 } 864 865 /* Randomize first block we write to */ 866 get_random_bytes(&i, 2); 867 i %= (kfifo_len(&zone->free_sectors) / 2); 868 869 while (i--) { 870 len = kfifo_out(&zone->free_sectors, 871 (unsigned char *)&block, 2); 872 WARN_ON(len != 2); 873 kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2); 874 } 875 return 0; 876 } 877 878 /* Get and automatically initialize an FTL mapping for one zone */ 879 static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num) 880 { 881 struct ftl_zone *zone; 882 int error; 883 884 BUG_ON(zone_num >= ftl->zone_count); 885 zone = &ftl->zones[zone_num]; 886 887 if (!zone->initialized) { 888 error = sm_init_zone(ftl, zone_num); 889 890 if (error) 891 return ERR_PTR(error); 892 } 893 return zone; 894 } 895 896 897 /* ----------------- cache handling ------------------------------------------*/ 898 899 /* Initialize the one block cache */ 900 static void sm_cache_init(struct sm_ftl *ftl) 901 { 902 ftl->cache_data_invalid_bitmap = 0xFFFFFFFF; 903 ftl->cache_clean = 1; 904 ftl->cache_zone = -1; 905 ftl->cache_block = -1; 906 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/ 907 } 908 909 /* Put sector in one block cache */ 910 static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset) 911 { 912 memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE); 913 clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap); 914 ftl->cache_clean = 0; 915 } 916 917 /* Read a sector from the cache */ 918 static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset) 919 { 920 if (test_bit(boffset / SM_SECTOR_SIZE, 921 &ftl->cache_data_invalid_bitmap)) 922 return -1; 923 924 memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE); 925 return 0; 926 } 927 928 /* Write the cache to hardware */ 929 static int sm_cache_flush(struct sm_ftl *ftl) 930 { 931 struct ftl_zone *zone; 932 933 int sector_num; 934 uint16_t write_sector; 935 int zone_num = ftl->cache_zone; 936 int block_num; 937 938 if (ftl->cache_clean) 939 return 0; 940 941 if (ftl->unstable) 942 return -EIO; 943 944 BUG_ON(zone_num < 0); 945 zone = &ftl->zones[zone_num]; 946 block_num = zone->lba_to_phys_table[ftl->cache_block]; 947 948 949 /* Try to read all unread areas of the cache block*/ 950 for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap, 951 ftl->block_size / SM_SECTOR_SIZE) { 952 953 if (!sm_read_sector(ftl, 954 zone_num, block_num, sector_num * SM_SECTOR_SIZE, 955 ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL)) 956 clear_bit(sector_num, 957 &ftl->cache_data_invalid_bitmap); 958 } 959 restart: 960 961 if (ftl->unstable) 962 return -EIO; 963 964 /* If there are no spare blocks, */ 965 /* we could still continue by erasing/writing the current block, 966 * but for such worn out media it doesn't worth the trouble, 967 * and the dangers 968 */ 969 if (kfifo_out(&zone->free_sectors, 970 (unsigned char *)&write_sector, 2) != 2) { 971 dbg("no free sectors for write!"); 972 return -EIO; 973 } 974 975 976 if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector, 977 ftl->cache_block, ftl->cache_data_invalid_bitmap)) 978 goto restart; 979 980 /* Update the FTL table */ 981 zone->lba_to_phys_table[ftl->cache_block] = write_sector; 982 983 /* Write successful, so erase and free the old block */ 984 if (block_num > 0) 985 sm_erase_block(ftl, zone_num, block_num, 1); 986 987 sm_cache_init(ftl); 988 return 0; 989 } 990 991 992 /* flush timer, runs a second after last write */ 993 static void sm_cache_flush_timer(struct timer_list *t) 994 { 995 struct sm_ftl *ftl = timer_container_of(ftl, t, timer); 996 queue_work(cache_flush_workqueue, &ftl->flush_work); 997 } 998 999 /* cache flush work, kicked by timer */ 1000 static void sm_cache_flush_work(struct work_struct *work) 1001 { 1002 struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work); 1003 mutex_lock(&ftl->mutex); 1004 sm_cache_flush(ftl); 1005 mutex_unlock(&ftl->mutex); 1006 return; 1007 } 1008 1009 /* ---------------- outside interface -------------------------------------- */ 1010 1011 /* outside interface: read a sector */ 1012 static int sm_read(struct mtd_blktrans_dev *dev, 1013 unsigned long sect_no, char *buf) 1014 { 1015 struct sm_ftl *ftl = dev->priv; 1016 struct ftl_zone *zone; 1017 int error = 0, in_cache = 0; 1018 int zone_num, block, boffset; 1019 1020 sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset); 1021 mutex_lock(&ftl->mutex); 1022 1023 1024 zone = sm_get_zone(ftl, zone_num); 1025 if (IS_ERR(zone)) { 1026 error = PTR_ERR(zone); 1027 goto unlock; 1028 } 1029 1030 /* Have to look at cache first */ 1031 if (ftl->cache_zone == zone_num && ftl->cache_block == block) { 1032 in_cache = 1; 1033 if (!sm_cache_get(ftl, buf, boffset)) 1034 goto unlock; 1035 } 1036 1037 /* Translate the block and return if doesn't exist in the table */ 1038 block = zone->lba_to_phys_table[block]; 1039 1040 if (block == -1) { 1041 memset(buf, 0xFF, SM_SECTOR_SIZE); 1042 goto unlock; 1043 } 1044 1045 if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) { 1046 error = -EIO; 1047 goto unlock; 1048 } 1049 1050 if (in_cache) 1051 sm_cache_put(ftl, buf, boffset); 1052 unlock: 1053 mutex_unlock(&ftl->mutex); 1054 return error; 1055 } 1056 1057 /* outside interface: write a sector */ 1058 static int sm_write(struct mtd_blktrans_dev *dev, 1059 unsigned long sec_no, char *buf) 1060 { 1061 struct sm_ftl *ftl = dev->priv; 1062 struct ftl_zone *zone; 1063 int error = 0, zone_num, block, boffset; 1064 1065 BUG_ON(ftl->readonly); 1066 sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset); 1067 1068 /* No need in flush thread running now */ 1069 timer_delete(&ftl->timer); 1070 mutex_lock(&ftl->mutex); 1071 1072 zone = sm_get_zone(ftl, zone_num); 1073 if (IS_ERR(zone)) { 1074 error = PTR_ERR(zone); 1075 goto unlock; 1076 } 1077 1078 /* If entry is not in cache, flush it */ 1079 if (ftl->cache_block != block || ftl->cache_zone != zone_num) { 1080 1081 error = sm_cache_flush(ftl); 1082 if (error) 1083 goto unlock; 1084 1085 ftl->cache_block = block; 1086 ftl->cache_zone = zone_num; 1087 } 1088 1089 sm_cache_put(ftl, buf, boffset); 1090 unlock: 1091 mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout)); 1092 mutex_unlock(&ftl->mutex); 1093 return error; 1094 } 1095 1096 /* outside interface: flush everything */ 1097 static int sm_flush(struct mtd_blktrans_dev *dev) 1098 { 1099 struct sm_ftl *ftl = dev->priv; 1100 int retval; 1101 1102 mutex_lock(&ftl->mutex); 1103 retval = sm_cache_flush(ftl); 1104 mutex_unlock(&ftl->mutex); 1105 return retval; 1106 } 1107 1108 /* outside interface: device is released */ 1109 static void sm_release(struct mtd_blktrans_dev *dev) 1110 { 1111 struct sm_ftl *ftl = dev->priv; 1112 1113 timer_delete_sync(&ftl->timer); 1114 cancel_work_sync(&ftl->flush_work); 1115 mutex_lock(&ftl->mutex); 1116 sm_cache_flush(ftl); 1117 mutex_unlock(&ftl->mutex); 1118 } 1119 1120 /* outside interface: get geometry */ 1121 static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) 1122 { 1123 struct sm_ftl *ftl = dev->priv; 1124 geo->heads = ftl->heads; 1125 geo->sectors = ftl->sectors; 1126 geo->cylinders = ftl->cylinders; 1127 return 0; 1128 } 1129 1130 /* external interface: main initialization function */ 1131 static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) 1132 { 1133 struct mtd_blktrans_dev *trans; 1134 struct sm_ftl *ftl; 1135 1136 /* Allocate & initialize our private structure */ 1137 ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL); 1138 if (!ftl) 1139 goto error1; 1140 1141 1142 mutex_init(&ftl->mutex); 1143 timer_setup(&ftl->timer, sm_cache_flush_timer, 0); 1144 INIT_WORK(&ftl->flush_work, sm_cache_flush_work); 1145 1146 /* Read media information */ 1147 if (sm_get_media_info(ftl, mtd)) { 1148 dbg("found unsupported mtd device, aborting"); 1149 goto error2; 1150 } 1151 1152 1153 /* Allocate temporary CIS buffer for read retry support */ 1154 ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); 1155 if (!ftl->cis_buffer) 1156 goto error2; 1157 1158 /* Allocate zone array, it will be initialized on demand */ 1159 ftl->zones = kcalloc(ftl->zone_count, sizeof(struct ftl_zone), 1160 GFP_KERNEL); 1161 if (!ftl->zones) 1162 goto error3; 1163 1164 /* Allocate the cache*/ 1165 ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL); 1166 1167 if (!ftl->cache_data) 1168 goto error4; 1169 1170 sm_cache_init(ftl); 1171 1172 1173 /* Allocate upper layer structure and initialize it */ 1174 trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL); 1175 if (!trans) 1176 goto error5; 1177 1178 ftl->trans = trans; 1179 trans->priv = ftl; 1180 1181 trans->tr = tr; 1182 trans->mtd = mtd; 1183 trans->devnum = -1; 1184 trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9; 1185 trans->readonly = ftl->readonly; 1186 1187 if (sm_find_cis(ftl)) { 1188 dbg("CIS not found on mtd device, aborting"); 1189 goto error6; 1190 } 1191 1192 ftl->disk_attributes = sm_create_sysfs_attributes(ftl); 1193 if (!ftl->disk_attributes) 1194 goto error6; 1195 trans->disk_attributes = ftl->disk_attributes; 1196 1197 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d", 1198 (int)(mtd->size / (1024 * 1024)), mtd->index); 1199 1200 dbg("FTL layout:"); 1201 dbg("%d zone(s), each consists of %d blocks (+%d spares)", 1202 ftl->zone_count, ftl->max_lba, 1203 ftl->zone_size - ftl->max_lba); 1204 dbg("each block consists of %d bytes", 1205 ftl->block_size); 1206 1207 1208 /* Register device*/ 1209 if (add_mtd_blktrans_dev(trans)) { 1210 dbg("error in mtdblktrans layer"); 1211 goto error6; 1212 } 1213 return; 1214 error6: 1215 kfree(trans); 1216 error5: 1217 kfree(ftl->cache_data); 1218 error4: 1219 kfree(ftl->zones); 1220 error3: 1221 kfree(ftl->cis_buffer); 1222 error2: 1223 kfree(ftl); 1224 error1: 1225 return; 1226 } 1227 1228 /* main interface: device {surprise,} removal */ 1229 static void sm_remove_dev(struct mtd_blktrans_dev *dev) 1230 { 1231 struct sm_ftl *ftl = dev->priv; 1232 int i; 1233 1234 del_mtd_blktrans_dev(dev); 1235 ftl->trans = NULL; 1236 1237 for (i = 0 ; i < ftl->zone_count; i++) { 1238 1239 if (!ftl->zones[i].initialized) 1240 continue; 1241 1242 kfree(ftl->zones[i].lba_to_phys_table); 1243 kfifo_free(&ftl->zones[i].free_sectors); 1244 } 1245 1246 sm_delete_sysfs_attributes(ftl); 1247 kfree(ftl->cis_buffer); 1248 kfree(ftl->zones); 1249 kfree(ftl->cache_data); 1250 kfree(ftl); 1251 } 1252 1253 static struct mtd_blktrans_ops sm_ftl_ops = { 1254 .name = "smblk", 1255 .major = 0, 1256 .part_bits = SM_FTL_PARTN_BITS, 1257 .blksize = SM_SECTOR_SIZE, 1258 .getgeo = sm_getgeo, 1259 1260 .add_mtd = sm_add_mtd, 1261 .remove_dev = sm_remove_dev, 1262 1263 .readsect = sm_read, 1264 .writesect = sm_write, 1265 1266 .flush = sm_flush, 1267 .release = sm_release, 1268 1269 .owner = THIS_MODULE, 1270 }; 1271 1272 static __init int sm_module_init(void) 1273 { 1274 int error = 0; 1275 1276 cache_flush_workqueue = create_freezable_workqueue("smflush"); 1277 if (!cache_flush_workqueue) 1278 return -ENOMEM; 1279 1280 error = register_mtd_blktrans(&sm_ftl_ops); 1281 if (error) 1282 destroy_workqueue(cache_flush_workqueue); 1283 return error; 1284 1285 } 1286 1287 static void __exit sm_module_exit(void) 1288 { 1289 destroy_workqueue(cache_flush_workqueue); 1290 deregister_mtd_blktrans(&sm_ftl_ops); 1291 } 1292 1293 module_init(sm_module_init); 1294 module_exit(sm_module_exit); 1295 1296 MODULE_LICENSE("GPL"); 1297 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); 1298 MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer"); 1299