1 /* 2 * rfd_ftl.c -- resident flash disk (flash translation layer) 3 * 4 * Copyright © 2005 Sean Young <sean@mess.org> 5 * 6 * This type of flash translation layer (FTL) is used by the Embedded BIOS 7 * by General Software. It is known as the Resident Flash Disk (RFD), see: 8 * 9 * http://www.gensw.com/pages/prod/bios/rfd.htm 10 * 11 * based on ftl.c 12 */ 13 14 #include <linux/hdreg.h> 15 #include <linux/init.h> 16 #include <linux/mtd/blktrans.h> 17 #include <linux/mtd/mtd.h> 18 #include <linux/vmalloc.h> 19 #include <linux/slab.h> 20 #include <linux/jiffies.h> 21 #include <linux/module.h> 22 23 #include <asm/types.h> 24 25 static int block_size = 0; 26 module_param(block_size, int, 0); 27 MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size"); 28 29 #define PREFIX "rfd_ftl: " 30 31 /* This major has been assigned by device@lanana.org */ 32 #ifndef RFD_FTL_MAJOR 33 #define RFD_FTL_MAJOR 256 34 #endif 35 36 /* Maximum number of partitions in an FTL region */ 37 #define PART_BITS 4 38 39 /* An erase unit should start with this value */ 40 #define RFD_MAGIC 0x9193 41 42 /* the second value is 0xffff or 0xffc8; function unknown */ 43 44 /* the third value is always 0xffff, ignored */ 45 46 /* next is an array of mapping for each corresponding sector */ 47 #define HEADER_MAP_OFFSET 3 48 #define SECTOR_DELETED 0x0000 49 #define SECTOR_ZERO 0xfffe 50 #define SECTOR_FREE 0xffff 51 52 #define SECTOR_SIZE 512 53 54 #define SECTORS_PER_TRACK 63 55 56 struct block { 57 enum { 58 BLOCK_OK, 59 BLOCK_ERASING, 60 BLOCK_ERASED, 61 BLOCK_UNUSED, 62 BLOCK_FAILED 63 } state; 64 int free_sectors; 65 int used_sectors; 66 int erases; 67 u_long offset; 68 }; 69 70 struct partition { 71 struct mtd_blktrans_dev mbd; 72 73 u_int block_size; /* size of erase unit */ 74 u_int total_blocks; /* number of erase units */ 75 u_int header_sectors_per_block; /* header sectors in erase unit */ 76 u_int data_sectors_per_block; /* data sectors in erase unit */ 77 u_int sector_count; /* sectors in translated disk */ 78 u_int header_size; /* bytes in header sector */ 79 int reserved_block; /* block next up for reclaim */ 80 int current_block; /* block to write to */ 81 u16 *header_cache; /* cached header */ 82 83 int is_reclaiming; 84 int cylinders; 85 int errors; 86 u_long *sector_map; 87 struct block *blocks; 88 }; 89 90 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf); 91 92 static int build_block_map(struct partition *part, int block_no) 93 { 94 struct block *block = &part->blocks[block_no]; 95 int i; 96 97 block->offset = part->block_size * block_no; 98 99 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) { 100 block->state = BLOCK_UNUSED; 101 return -ENOENT; 102 } 103 104 block->state = BLOCK_OK; 105 106 for (i=0; i<part->data_sectors_per_block; i++) { 107 u16 entry; 108 109 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]); 110 111 if (entry == SECTOR_DELETED) 112 continue; 113 114 if (entry == SECTOR_FREE) { 115 block->free_sectors++; 116 continue; 117 } 118 119 if (entry == SECTOR_ZERO) 120 entry = 0; 121 122 if (entry >= part->sector_count) { 123 printk(KERN_WARNING PREFIX 124 "'%s': unit #%d: entry %d corrupt, " 125 "sector %d out of range\n", 126 part->mbd.mtd->name, block_no, i, entry); 127 continue; 128 } 129 130 if (part->sector_map[entry] != -1) { 131 printk(KERN_WARNING PREFIX 132 "'%s': more than one entry for sector %d\n", 133 part->mbd.mtd->name, entry); 134 part->errors = 1; 135 continue; 136 } 137 138 part->sector_map[entry] = block->offset + 139 (i + part->header_sectors_per_block) * SECTOR_SIZE; 140 141 block->used_sectors++; 142 } 143 144 if (block->free_sectors == part->data_sectors_per_block) 145 part->reserved_block = block_no; 146 147 return 0; 148 } 149 150 static int scan_header(struct partition *part) 151 { 152 int sectors_per_block; 153 int i, rc = -ENOMEM; 154 int blocks_found; 155 size_t retlen; 156 157 sectors_per_block = part->block_size / SECTOR_SIZE; 158 part->total_blocks = (u32)part->mbd.mtd->size / part->block_size; 159 160 if (part->total_blocks < 2) 161 return -ENOENT; 162 163 /* each erase block has three bytes header, followed by the map */ 164 part->header_sectors_per_block = 165 ((HEADER_MAP_OFFSET + sectors_per_block) * 166 sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE; 167 168 part->data_sectors_per_block = sectors_per_block - 169 part->header_sectors_per_block; 170 171 part->header_size = (HEADER_MAP_OFFSET + 172 part->data_sectors_per_block) * sizeof(u16); 173 174 part->cylinders = (part->data_sectors_per_block * 175 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK; 176 177 part->sector_count = part->cylinders * SECTORS_PER_TRACK; 178 179 part->current_block = -1; 180 part->reserved_block = -1; 181 part->is_reclaiming = 0; 182 183 part->header_cache = kmalloc(part->header_size, GFP_KERNEL); 184 if (!part->header_cache) 185 goto err; 186 187 part->blocks = kcalloc(part->total_blocks, sizeof(struct block), 188 GFP_KERNEL); 189 if (!part->blocks) 190 goto err; 191 192 part->sector_map = vmalloc(part->sector_count * sizeof(u_long)); 193 if (!part->sector_map) { 194 printk(KERN_ERR PREFIX "'%s': unable to allocate memory for " 195 "sector map", part->mbd.mtd->name); 196 goto err; 197 } 198 199 for (i=0; i<part->sector_count; i++) 200 part->sector_map[i] = -1; 201 202 for (i=0, blocks_found=0; i<part->total_blocks; i++) { 203 rc = mtd_read(part->mbd.mtd, i * part->block_size, 204 part->header_size, &retlen, 205 (u_char *)part->header_cache); 206 207 if (!rc && retlen != part->header_size) 208 rc = -EIO; 209 210 if (rc) 211 goto err; 212 213 if (!build_block_map(part, i)) 214 blocks_found++; 215 } 216 217 if (blocks_found == 0) { 218 printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n", 219 part->mbd.mtd->name); 220 rc = -ENOENT; 221 goto err; 222 } 223 224 if (part->reserved_block == -1) { 225 printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n", 226 part->mbd.mtd->name); 227 228 part->errors = 1; 229 } 230 231 return 0; 232 233 err: 234 vfree(part->sector_map); 235 kfree(part->header_cache); 236 kfree(part->blocks); 237 238 return rc; 239 } 240 241 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) 242 { 243 struct partition *part = (struct partition*)dev; 244 u_long addr; 245 size_t retlen; 246 int rc; 247 248 if (sector >= part->sector_count) 249 return -EIO; 250 251 addr = part->sector_map[sector]; 252 if (addr != -1) { 253 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 254 (u_char *)buf); 255 if (!rc && retlen != SECTOR_SIZE) 256 rc = -EIO; 257 258 if (rc) { 259 printk(KERN_WARNING PREFIX "error reading '%s' at " 260 "0x%lx\n", part->mbd.mtd->name, addr); 261 return rc; 262 } 263 } else 264 memset(buf, 0, SECTOR_SIZE); 265 266 return 0; 267 } 268 269 static int erase_block(struct partition *part, int block) 270 { 271 struct erase_info *erase; 272 int rc; 273 274 erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL); 275 if (!erase) 276 return -ENOMEM; 277 278 erase->addr = part->blocks[block].offset; 279 erase->len = part->block_size; 280 281 part->blocks[block].state = BLOCK_ERASING; 282 part->blocks[block].free_sectors = 0; 283 284 rc = mtd_erase(part->mbd.mtd, erase); 285 if (rc) { 286 printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' " 287 "failed\n", (unsigned long long)erase->addr, 288 (unsigned long long)erase->len, part->mbd.mtd->name); 289 part->blocks[block].state = BLOCK_FAILED; 290 part->blocks[block].free_sectors = 0; 291 part->blocks[block].used_sectors = 0; 292 } else { 293 u16 magic = cpu_to_le16(RFD_MAGIC); 294 size_t retlen; 295 296 part->blocks[block].state = BLOCK_ERASED; 297 part->blocks[block].free_sectors = part->data_sectors_per_block; 298 part->blocks[block].used_sectors = 0; 299 part->blocks[block].erases++; 300 301 rc = mtd_write(part->mbd.mtd, part->blocks[block].offset, 302 sizeof(magic), &retlen, (u_char *)&magic); 303 if (!rc && retlen != sizeof(magic)) 304 rc = -EIO; 305 306 if (rc) { 307 pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n", 308 part->mbd.mtd->name, part->blocks[block].offset); 309 part->blocks[block].state = BLOCK_FAILED; 310 } else { 311 part->blocks[block].state = BLOCK_OK; 312 } 313 } 314 315 kfree(erase); 316 317 return rc; 318 } 319 320 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector) 321 { 322 void *sector_data; 323 u16 *map; 324 size_t retlen; 325 int i, rc = -ENOMEM; 326 327 part->is_reclaiming = 1; 328 329 sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL); 330 if (!sector_data) 331 goto err3; 332 333 map = kmalloc(part->header_size, GFP_KERNEL); 334 if (!map) 335 goto err2; 336 337 rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset, 338 part->header_size, &retlen, (u_char *)map); 339 340 if (!rc && retlen != part->header_size) 341 rc = -EIO; 342 343 if (rc) { 344 printk(KERN_ERR PREFIX "error reading '%s' at " 345 "0x%lx\n", part->mbd.mtd->name, 346 part->blocks[block_no].offset); 347 348 goto err; 349 } 350 351 for (i=0; i<part->data_sectors_per_block; i++) { 352 u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]); 353 u_long addr; 354 355 356 if (entry == SECTOR_FREE || entry == SECTOR_DELETED) 357 continue; 358 359 if (entry == SECTOR_ZERO) 360 entry = 0; 361 362 /* already warned about and ignored in build_block_map() */ 363 if (entry >= part->sector_count) 364 continue; 365 366 addr = part->blocks[block_no].offset + 367 (i + part->header_sectors_per_block) * SECTOR_SIZE; 368 369 if (*old_sector == addr) { 370 *old_sector = -1; 371 if (!part->blocks[block_no].used_sectors--) { 372 rc = erase_block(part, block_no); 373 break; 374 } 375 continue; 376 } 377 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 378 sector_data); 379 380 if (!rc && retlen != SECTOR_SIZE) 381 rc = -EIO; 382 383 if (rc) { 384 printk(KERN_ERR PREFIX "'%s': Unable to " 385 "read sector for relocation\n", 386 part->mbd.mtd->name); 387 388 goto err; 389 } 390 391 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part, 392 entry, sector_data); 393 394 if (rc) 395 goto err; 396 } 397 398 err: 399 kfree(map); 400 err2: 401 kfree(sector_data); 402 err3: 403 part->is_reclaiming = 0; 404 405 return rc; 406 } 407 408 static int reclaim_block(struct partition *part, u_long *old_sector) 409 { 410 int block, best_block, score, old_sector_block; 411 int rc; 412 413 /* we have a race if sync doesn't exist */ 414 mtd_sync(part->mbd.mtd); 415 416 score = 0x7fffffff; /* MAX_INT */ 417 best_block = -1; 418 if (*old_sector != -1) 419 old_sector_block = *old_sector / part->block_size; 420 else 421 old_sector_block = -1; 422 423 for (block=0; block<part->total_blocks; block++) { 424 int this_score; 425 426 if (block == part->reserved_block) 427 continue; 428 429 /* 430 * Postpone reclaiming if there is a free sector as 431 * more removed sectors is more efficient (have to move 432 * less). 433 */ 434 if (part->blocks[block].free_sectors) 435 return 0; 436 437 this_score = part->blocks[block].used_sectors; 438 439 if (block == old_sector_block) 440 this_score--; 441 else { 442 /* no point in moving a full block */ 443 if (part->blocks[block].used_sectors == 444 part->data_sectors_per_block) 445 continue; 446 } 447 448 this_score += part->blocks[block].erases; 449 450 if (this_score < score) { 451 best_block = block; 452 score = this_score; 453 } 454 } 455 456 if (best_block == -1) 457 return -ENOSPC; 458 459 part->current_block = -1; 460 part->reserved_block = best_block; 461 462 pr_debug("reclaim_block: reclaiming block #%d with %d used " 463 "%d free sectors\n", best_block, 464 part->blocks[best_block].used_sectors, 465 part->blocks[best_block].free_sectors); 466 467 if (part->blocks[best_block].used_sectors) 468 rc = move_block_contents(part, best_block, old_sector); 469 else 470 rc = erase_block(part, best_block); 471 472 return rc; 473 } 474 475 /* 476 * IMPROVE: It would be best to choose the block with the most deleted sectors, 477 * because if we fill that one up first it'll have the most chance of having 478 * the least live sectors at reclaim. 479 */ 480 static int find_free_block(struct partition *part) 481 { 482 int block, stop; 483 484 block = part->current_block == -1 ? 485 jiffies % part->total_blocks : part->current_block; 486 stop = block; 487 488 do { 489 if (part->blocks[block].free_sectors && 490 block != part->reserved_block) 491 return block; 492 493 if (part->blocks[block].state == BLOCK_UNUSED) 494 erase_block(part, block); 495 496 if (++block >= part->total_blocks) 497 block = 0; 498 499 } while (block != stop); 500 501 return -1; 502 } 503 504 static int find_writable_block(struct partition *part, u_long *old_sector) 505 { 506 int rc, block; 507 size_t retlen; 508 509 block = find_free_block(part); 510 511 if (block == -1) { 512 if (!part->is_reclaiming) { 513 rc = reclaim_block(part, old_sector); 514 if (rc) 515 goto err; 516 517 block = find_free_block(part); 518 } 519 520 if (block == -1) { 521 rc = -ENOSPC; 522 goto err; 523 } 524 } 525 526 rc = mtd_read(part->mbd.mtd, part->blocks[block].offset, 527 part->header_size, &retlen, 528 (u_char *)part->header_cache); 529 530 if (!rc && retlen != part->header_size) 531 rc = -EIO; 532 533 if (rc) { 534 printk(KERN_ERR PREFIX "'%s': unable to read header at " 535 "0x%lx\n", part->mbd.mtd->name, 536 part->blocks[block].offset); 537 goto err; 538 } 539 540 part->current_block = block; 541 542 err: 543 return rc; 544 } 545 546 static int mark_sector_deleted(struct partition *part, u_long old_addr) 547 { 548 int block, offset, rc; 549 u_long addr; 550 size_t retlen; 551 u16 del = cpu_to_le16(SECTOR_DELETED); 552 553 block = old_addr / part->block_size; 554 offset = (old_addr % part->block_size) / SECTOR_SIZE - 555 part->header_sectors_per_block; 556 557 addr = part->blocks[block].offset + 558 (HEADER_MAP_OFFSET + offset) * sizeof(u16); 559 rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen, 560 (u_char *)&del); 561 562 if (!rc && retlen != sizeof(del)) 563 rc = -EIO; 564 565 if (rc) { 566 printk(KERN_ERR PREFIX "error writing '%s' at " 567 "0x%lx\n", part->mbd.mtd->name, addr); 568 goto err; 569 } 570 if (block == part->current_block) 571 part->header_cache[offset + HEADER_MAP_OFFSET] = del; 572 573 part->blocks[block].used_sectors--; 574 575 if (!part->blocks[block].used_sectors && 576 !part->blocks[block].free_sectors) 577 rc = erase_block(part, block); 578 579 err: 580 return rc; 581 } 582 583 static int find_free_sector(const struct partition *part, const struct block *block) 584 { 585 int i, stop; 586 587 i = stop = part->data_sectors_per_block - block->free_sectors; 588 589 do { 590 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]) 591 == SECTOR_FREE) 592 return i; 593 594 if (++i == part->data_sectors_per_block) 595 i = 0; 596 } 597 while(i != stop); 598 599 return -1; 600 } 601 602 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr) 603 { 604 struct partition *part = (struct partition*)dev; 605 struct block *block; 606 u_long addr; 607 int i; 608 int rc; 609 size_t retlen; 610 u16 entry; 611 612 if (part->current_block == -1 || 613 !part->blocks[part->current_block].free_sectors) { 614 615 rc = find_writable_block(part, old_addr); 616 if (rc) 617 goto err; 618 } 619 620 block = &part->blocks[part->current_block]; 621 622 i = find_free_sector(part, block); 623 624 if (i < 0) { 625 rc = -ENOSPC; 626 goto err; 627 } 628 629 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE + 630 block->offset; 631 rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, 632 (u_char *)buf); 633 634 if (!rc && retlen != SECTOR_SIZE) 635 rc = -EIO; 636 637 if (rc) { 638 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n", 639 part->mbd.mtd->name, addr); 640 goto err; 641 } 642 643 part->sector_map[sector] = addr; 644 645 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector); 646 647 part->header_cache[i + HEADER_MAP_OFFSET] = entry; 648 649 addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16); 650 rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen, 651 (u_char *)&entry); 652 653 if (!rc && retlen != sizeof(entry)) 654 rc = -EIO; 655 656 if (rc) { 657 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n", 658 part->mbd.mtd->name, addr); 659 goto err; 660 } 661 block->used_sectors++; 662 block->free_sectors--; 663 664 err: 665 return rc; 666 } 667 668 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) 669 { 670 struct partition *part = (struct partition*)dev; 671 u_long old_addr; 672 int i; 673 int rc = 0; 674 675 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector); 676 677 if (part->reserved_block == -1) { 678 rc = -EACCES; 679 goto err; 680 } 681 682 if (sector >= part->sector_count) { 683 rc = -EIO; 684 goto err; 685 } 686 687 old_addr = part->sector_map[sector]; 688 689 for (i=0; i<SECTOR_SIZE; i++) { 690 if (!buf[i]) 691 continue; 692 693 rc = do_writesect(dev, sector, buf, &old_addr); 694 if (rc) 695 goto err; 696 break; 697 } 698 699 if (i == SECTOR_SIZE) 700 part->sector_map[sector] = -1; 701 702 if (old_addr != -1) 703 rc = mark_sector_deleted(part, old_addr); 704 705 err: 706 return rc; 707 } 708 709 static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) 710 { 711 struct partition *part = (struct partition*)dev; 712 713 geo->heads = 1; 714 geo->sectors = SECTORS_PER_TRACK; 715 geo->cylinders = part->cylinders; 716 717 return 0; 718 } 719 720 static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) 721 { 722 struct partition *part; 723 724 if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX) 725 return; 726 727 part = kzalloc(sizeof(struct partition), GFP_KERNEL); 728 if (!part) 729 return; 730 731 part->mbd.mtd = mtd; 732 733 if (block_size) 734 part->block_size = block_size; 735 else { 736 if (!mtd->erasesize) { 737 printk(KERN_WARNING PREFIX "please provide block_size"); 738 goto out; 739 } else 740 part->block_size = mtd->erasesize; 741 } 742 743 if (scan_header(part) == 0) { 744 part->mbd.size = part->sector_count; 745 part->mbd.tr = tr; 746 part->mbd.devnum = -1; 747 if (!(mtd->flags & MTD_WRITEABLE)) 748 part->mbd.readonly = 1; 749 else if (part->errors) { 750 printk(KERN_WARNING PREFIX "'%s': errors found, " 751 "setting read-only\n", mtd->name); 752 part->mbd.readonly = 1; 753 } 754 755 printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n", 756 mtd->name, mtd->type, mtd->flags); 757 758 if (!add_mtd_blktrans_dev((void*)part)) 759 return; 760 } 761 out: 762 kfree(part); 763 } 764 765 static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev) 766 { 767 struct partition *part = (struct partition*)dev; 768 int i; 769 770 for (i=0; i<part->total_blocks; i++) { 771 pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n", 772 part->mbd.mtd->name, i, part->blocks[i].erases); 773 } 774 775 del_mtd_blktrans_dev(dev); 776 vfree(part->sector_map); 777 kfree(part->header_cache); 778 kfree(part->blocks); 779 } 780 781 static struct mtd_blktrans_ops rfd_ftl_tr = { 782 .name = "rfd", 783 .major = RFD_FTL_MAJOR, 784 .part_bits = PART_BITS, 785 .blksize = SECTOR_SIZE, 786 787 .readsect = rfd_ftl_readsect, 788 .writesect = rfd_ftl_writesect, 789 .getgeo = rfd_ftl_getgeo, 790 .add_mtd = rfd_ftl_add_mtd, 791 .remove_dev = rfd_ftl_remove_dev, 792 .owner = THIS_MODULE, 793 }; 794 795 static int __init init_rfd_ftl(void) 796 { 797 return register_mtd_blktrans(&rfd_ftl_tr); 798 } 799 800 static void __exit cleanup_rfd_ftl(void) 801 { 802 deregister_mtd_blktrans(&rfd_ftl_tr); 803 } 804 805 module_init(init_rfd_ftl); 806 module_exit(cleanup_rfd_ftl); 807 808 MODULE_LICENSE("GPL"); 809 MODULE_AUTHOR("Sean Young <sean@mess.org>"); 810 MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, " 811 "used by General Software's Embedded BIOS"); 812 813