xref: /linux/drivers/mtd/rfd_ftl.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * rfd_ftl.c -- resident flash disk (flash translation layer)
3  *
4  * Copyright © 2005  Sean Young <sean@mess.org>
5  *
6  * This type of flash translation layer (FTL) is used by the Embedded BIOS
7  * by General Software. It is known as the Resident Flash Disk (RFD), see:
8  *
9  *	http://www.gensw.com/pages/prod/bios/rfd.htm
10  *
11  * based on ftl.c
12  */
13 
14 #include <linux/hdreg.h>
15 #include <linux/init.h>
16 #include <linux/mtd/blktrans.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/vmalloc.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/module.h>
22 
23 #include <asm/types.h>
24 
25 static int block_size = 0;
26 module_param(block_size, int, 0);
27 MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
28 
29 #define PREFIX "rfd_ftl: "
30 
31 /* This major has been assigned by device@lanana.org */
32 #ifndef RFD_FTL_MAJOR
33 #define RFD_FTL_MAJOR		256
34 #endif
35 
36 /* Maximum number of partitions in an FTL region */
37 #define PART_BITS		4
38 
39 /* An erase unit should start with this value */
40 #define RFD_MAGIC		0x9193
41 
42 /* the second value is 0xffff or 0xffc8; function unknown */
43 
44 /* the third value is always 0xffff, ignored */
45 
46 /* next is an array of mapping for each corresponding sector */
47 #define HEADER_MAP_OFFSET	3
48 #define SECTOR_DELETED		0x0000
49 #define SECTOR_ZERO		0xfffe
50 #define SECTOR_FREE		0xffff
51 
52 #define SECTOR_SIZE		512
53 
54 #define SECTORS_PER_TRACK	63
55 
56 struct block {
57 	enum {
58 		BLOCK_OK,
59 		BLOCK_ERASING,
60 		BLOCK_ERASED,
61 		BLOCK_UNUSED,
62 		BLOCK_FAILED
63 	} state;
64 	int free_sectors;
65 	int used_sectors;
66 	int erases;
67 	u_long offset;
68 };
69 
70 struct partition {
71 	struct mtd_blktrans_dev mbd;
72 
73 	u_int block_size;		/* size of erase unit */
74 	u_int total_blocks;		/* number of erase units */
75 	u_int header_sectors_per_block;	/* header sectors in erase unit */
76 	u_int data_sectors_per_block;	/* data sectors in erase unit */
77 	u_int sector_count;		/* sectors in translated disk */
78 	u_int header_size;		/* bytes in header sector */
79 	int reserved_block;		/* block next up for reclaim */
80 	int current_block;		/* block to write to */
81 	u16 *header_cache;		/* cached header */
82 
83 	int is_reclaiming;
84 	int cylinders;
85 	int errors;
86 	u_long *sector_map;
87 	struct block *blocks;
88 };
89 
90 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
91 
92 static int build_block_map(struct partition *part, int block_no)
93 {
94 	struct block *block = &part->blocks[block_no];
95 	int i;
96 
97 	block->offset = part->block_size * block_no;
98 
99 	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
100 		block->state = BLOCK_UNUSED;
101 		return -ENOENT;
102 	}
103 
104 	block->state = BLOCK_OK;
105 
106 	for (i=0; i<part->data_sectors_per_block; i++) {
107 		u16 entry;
108 
109 		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
110 
111 		if (entry == SECTOR_DELETED)
112 			continue;
113 
114 		if (entry == SECTOR_FREE) {
115 			block->free_sectors++;
116 			continue;
117 		}
118 
119 		if (entry == SECTOR_ZERO)
120 			entry = 0;
121 
122 		if (entry >= part->sector_count) {
123 			printk(KERN_WARNING PREFIX
124 				"'%s': unit #%d: entry %d corrupt, "
125 				"sector %d out of range\n",
126 				part->mbd.mtd->name, block_no, i, entry);
127 			continue;
128 		}
129 
130 		if (part->sector_map[entry] != -1) {
131 			printk(KERN_WARNING PREFIX
132 				"'%s': more than one entry for sector %d\n",
133 				part->mbd.mtd->name, entry);
134 			part->errors = 1;
135 			continue;
136 		}
137 
138 		part->sector_map[entry] = block->offset +
139 			(i + part->header_sectors_per_block) * SECTOR_SIZE;
140 
141 		block->used_sectors++;
142 	}
143 
144 	if (block->free_sectors == part->data_sectors_per_block)
145 		part->reserved_block = block_no;
146 
147 	return 0;
148 }
149 
150 static int scan_header(struct partition *part)
151 {
152 	int sectors_per_block;
153 	int i, rc = -ENOMEM;
154 	int blocks_found;
155 	size_t retlen;
156 
157 	sectors_per_block = part->block_size / SECTOR_SIZE;
158 	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
159 
160 	if (part->total_blocks < 2)
161 		return -ENOENT;
162 
163 	/* each erase block has three bytes header, followed by the map */
164 	part->header_sectors_per_block =
165 			((HEADER_MAP_OFFSET + sectors_per_block) *
166 			sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
167 
168 	part->data_sectors_per_block = sectors_per_block -
169 			part->header_sectors_per_block;
170 
171 	part->header_size = (HEADER_MAP_OFFSET +
172 			part->data_sectors_per_block) * sizeof(u16);
173 
174 	part->cylinders = (part->data_sectors_per_block *
175 			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
176 
177 	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
178 
179 	part->current_block = -1;
180 	part->reserved_block = -1;
181 	part->is_reclaiming = 0;
182 
183 	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
184 	if (!part->header_cache)
185 		goto err;
186 
187 	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
188 			GFP_KERNEL);
189 	if (!part->blocks)
190 		goto err;
191 
192 	part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
193 	if (!part->sector_map) {
194 		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
195 			"sector map", part->mbd.mtd->name);
196 		goto err;
197 	}
198 
199 	for (i=0; i<part->sector_count; i++)
200 		part->sector_map[i] = -1;
201 
202 	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
203 		rc = mtd_read(part->mbd.mtd, i * part->block_size,
204 			      part->header_size, &retlen,
205 			      (u_char *)part->header_cache);
206 
207 		if (!rc && retlen != part->header_size)
208 			rc = -EIO;
209 
210 		if (rc)
211 			goto err;
212 
213 		if (!build_block_map(part, i))
214 			blocks_found++;
215 	}
216 
217 	if (blocks_found == 0) {
218 		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
219 				part->mbd.mtd->name);
220 		rc = -ENOENT;
221 		goto err;
222 	}
223 
224 	if (part->reserved_block == -1) {
225 		printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
226 				part->mbd.mtd->name);
227 
228 		part->errors = 1;
229 	}
230 
231 	return 0;
232 
233 err:
234 	vfree(part->sector_map);
235 	kfree(part->header_cache);
236 	kfree(part->blocks);
237 
238 	return rc;
239 }
240 
241 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
242 {
243 	struct partition *part = (struct partition*)dev;
244 	u_long addr;
245 	size_t retlen;
246 	int rc;
247 
248 	if (sector >= part->sector_count)
249 		return -EIO;
250 
251 	addr = part->sector_map[sector];
252 	if (addr != -1) {
253 		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
254 			      (u_char *)buf);
255 		if (!rc && retlen != SECTOR_SIZE)
256 			rc = -EIO;
257 
258 		if (rc) {
259 			printk(KERN_WARNING PREFIX "error reading '%s' at "
260 				"0x%lx\n", part->mbd.mtd->name, addr);
261 			return rc;
262 		}
263 	} else
264 		memset(buf, 0, SECTOR_SIZE);
265 
266 	return 0;
267 }
268 
269 static void erase_callback(struct erase_info *erase)
270 {
271 	struct partition *part;
272 	u16 magic;
273 	int i, rc;
274 	size_t retlen;
275 
276 	part = (struct partition*)erase->priv;
277 
278 	i = (u32)erase->addr / part->block_size;
279 	if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
280 	    erase->addr > UINT_MAX) {
281 		printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
282 				"on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
283 		return;
284 	}
285 
286 	if (erase->state != MTD_ERASE_DONE) {
287 		printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
288 				"state %d\n", (unsigned long long)erase->addr,
289 				part->mbd.mtd->name, erase->state);
290 
291 		part->blocks[i].state = BLOCK_FAILED;
292 		part->blocks[i].free_sectors = 0;
293 		part->blocks[i].used_sectors = 0;
294 
295 		kfree(erase);
296 
297 		return;
298 	}
299 
300 	magic = cpu_to_le16(RFD_MAGIC);
301 
302 	part->blocks[i].state = BLOCK_ERASED;
303 	part->blocks[i].free_sectors = part->data_sectors_per_block;
304 	part->blocks[i].used_sectors = 0;
305 	part->blocks[i].erases++;
306 
307 	rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic),
308 		       &retlen, (u_char *)&magic);
309 
310 	if (!rc && retlen != sizeof(magic))
311 		rc = -EIO;
312 
313 	if (rc) {
314 		printk(KERN_ERR PREFIX "'%s': unable to write RFD "
315 				"header at 0x%lx\n",
316 				part->mbd.mtd->name,
317 				part->blocks[i].offset);
318 		part->blocks[i].state = BLOCK_FAILED;
319 	}
320 	else
321 		part->blocks[i].state = BLOCK_OK;
322 
323 	kfree(erase);
324 }
325 
326 static int erase_block(struct partition *part, int block)
327 {
328 	struct erase_info *erase;
329 	int rc = -ENOMEM;
330 
331 	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
332 	if (!erase)
333 		goto err;
334 
335 	erase->mtd = part->mbd.mtd;
336 	erase->callback = erase_callback;
337 	erase->addr = part->blocks[block].offset;
338 	erase->len = part->block_size;
339 	erase->priv = (u_long)part;
340 
341 	part->blocks[block].state = BLOCK_ERASING;
342 	part->blocks[block].free_sectors = 0;
343 
344 	rc = mtd_erase(part->mbd.mtd, erase);
345 
346 	if (rc) {
347 		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
348 				"failed\n", (unsigned long long)erase->addr,
349 				(unsigned long long)erase->len, part->mbd.mtd->name);
350 		kfree(erase);
351 	}
352 
353 err:
354 	return rc;
355 }
356 
357 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
358 {
359 	void *sector_data;
360 	u16 *map;
361 	size_t retlen;
362 	int i, rc = -ENOMEM;
363 
364 	part->is_reclaiming = 1;
365 
366 	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
367 	if (!sector_data)
368 		goto err3;
369 
370 	map = kmalloc(part->header_size, GFP_KERNEL);
371 	if (!map)
372 		goto err2;
373 
374 	rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
375 		      part->header_size, &retlen, (u_char *)map);
376 
377 	if (!rc && retlen != part->header_size)
378 		rc = -EIO;
379 
380 	if (rc) {
381 		printk(KERN_ERR PREFIX "error reading '%s' at "
382 			"0x%lx\n", part->mbd.mtd->name,
383 			part->blocks[block_no].offset);
384 
385 		goto err;
386 	}
387 
388 	for (i=0; i<part->data_sectors_per_block; i++) {
389 		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
390 		u_long addr;
391 
392 
393 		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
394 			continue;
395 
396 		if (entry == SECTOR_ZERO)
397 			entry = 0;
398 
399 		/* already warned about and ignored in build_block_map() */
400 		if (entry >= part->sector_count)
401 			continue;
402 
403 		addr = part->blocks[block_no].offset +
404 			(i + part->header_sectors_per_block) * SECTOR_SIZE;
405 
406 		if (*old_sector == addr) {
407 			*old_sector = -1;
408 			if (!part->blocks[block_no].used_sectors--) {
409 				rc = erase_block(part, block_no);
410 				break;
411 			}
412 			continue;
413 		}
414 		rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
415 			      sector_data);
416 
417 		if (!rc && retlen != SECTOR_SIZE)
418 			rc = -EIO;
419 
420 		if (rc) {
421 			printk(KERN_ERR PREFIX "'%s': Unable to "
422 				"read sector for relocation\n",
423 				part->mbd.mtd->name);
424 
425 			goto err;
426 		}
427 
428 		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
429 				entry, sector_data);
430 
431 		if (rc)
432 			goto err;
433 	}
434 
435 err:
436 	kfree(map);
437 err2:
438 	kfree(sector_data);
439 err3:
440 	part->is_reclaiming = 0;
441 
442 	return rc;
443 }
444 
445 static int reclaim_block(struct partition *part, u_long *old_sector)
446 {
447 	int block, best_block, score, old_sector_block;
448 	int rc;
449 
450 	/* we have a race if sync doesn't exist */
451 	mtd_sync(part->mbd.mtd);
452 
453 	score = 0x7fffffff; /* MAX_INT */
454 	best_block = -1;
455 	if (*old_sector != -1)
456 		old_sector_block = *old_sector / part->block_size;
457 	else
458 		old_sector_block = -1;
459 
460 	for (block=0; block<part->total_blocks; block++) {
461 		int this_score;
462 
463 		if (block == part->reserved_block)
464 			continue;
465 
466 		/*
467 		 * Postpone reclaiming if there is a free sector as
468 		 * more removed sectors is more efficient (have to move
469 		 * less).
470 		 */
471 		if (part->blocks[block].free_sectors)
472 			return 0;
473 
474 		this_score = part->blocks[block].used_sectors;
475 
476 		if (block == old_sector_block)
477 			this_score--;
478 		else {
479 			/* no point in moving a full block */
480 			if (part->blocks[block].used_sectors ==
481 					part->data_sectors_per_block)
482 				continue;
483 		}
484 
485 		this_score += part->blocks[block].erases;
486 
487 		if (this_score < score) {
488 			best_block = block;
489 			score = this_score;
490 		}
491 	}
492 
493 	if (best_block == -1)
494 		return -ENOSPC;
495 
496 	part->current_block = -1;
497 	part->reserved_block = best_block;
498 
499 	pr_debug("reclaim_block: reclaiming block #%d with %d used "
500 		 "%d free sectors\n", best_block,
501 		 part->blocks[best_block].used_sectors,
502 		 part->blocks[best_block].free_sectors);
503 
504 	if (part->blocks[best_block].used_sectors)
505 		rc = move_block_contents(part, best_block, old_sector);
506 	else
507 		rc = erase_block(part, best_block);
508 
509 	return rc;
510 }
511 
512 /*
513  * IMPROVE: It would be best to choose the block with the most deleted sectors,
514  * because if we fill that one up first it'll have the most chance of having
515  * the least live sectors at reclaim.
516  */
517 static int find_free_block(struct partition *part)
518 {
519 	int block, stop;
520 
521 	block = part->current_block == -1 ?
522 			jiffies % part->total_blocks : part->current_block;
523 	stop = block;
524 
525 	do {
526 		if (part->blocks[block].free_sectors &&
527 				block != part->reserved_block)
528 			return block;
529 
530 		if (part->blocks[block].state == BLOCK_UNUSED)
531 			erase_block(part, block);
532 
533 		if (++block >= part->total_blocks)
534 			block = 0;
535 
536 	} while (block != stop);
537 
538 	return -1;
539 }
540 
541 static int find_writable_block(struct partition *part, u_long *old_sector)
542 {
543 	int rc, block;
544 	size_t retlen;
545 
546 	block = find_free_block(part);
547 
548 	if (block == -1) {
549 		if (!part->is_reclaiming) {
550 			rc = reclaim_block(part, old_sector);
551 			if (rc)
552 				goto err;
553 
554 			block = find_free_block(part);
555 		}
556 
557 		if (block == -1) {
558 			rc = -ENOSPC;
559 			goto err;
560 		}
561 	}
562 
563 	rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
564 		      part->header_size, &retlen,
565 		      (u_char *)part->header_cache);
566 
567 	if (!rc && retlen != part->header_size)
568 		rc = -EIO;
569 
570 	if (rc) {
571 		printk(KERN_ERR PREFIX "'%s': unable to read header at "
572 				"0x%lx\n", part->mbd.mtd->name,
573 				part->blocks[block].offset);
574 		goto err;
575 	}
576 
577 	part->current_block = block;
578 
579 err:
580 	return rc;
581 }
582 
583 static int mark_sector_deleted(struct partition *part, u_long old_addr)
584 {
585 	int block, offset, rc;
586 	u_long addr;
587 	size_t retlen;
588 	u16 del = cpu_to_le16(SECTOR_DELETED);
589 
590 	block = old_addr / part->block_size;
591 	offset = (old_addr % part->block_size) / SECTOR_SIZE -
592 		part->header_sectors_per_block;
593 
594 	addr = part->blocks[block].offset +
595 			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
596 	rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
597 		       (u_char *)&del);
598 
599 	if (!rc && retlen != sizeof(del))
600 		rc = -EIO;
601 
602 	if (rc) {
603 		printk(KERN_ERR PREFIX "error writing '%s' at "
604 			"0x%lx\n", part->mbd.mtd->name, addr);
605 		goto err;
606 	}
607 	if (block == part->current_block)
608 		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
609 
610 	part->blocks[block].used_sectors--;
611 
612 	if (!part->blocks[block].used_sectors &&
613 	    !part->blocks[block].free_sectors)
614 		rc = erase_block(part, block);
615 
616 err:
617 	return rc;
618 }
619 
620 static int find_free_sector(const struct partition *part, const struct block *block)
621 {
622 	int i, stop;
623 
624 	i = stop = part->data_sectors_per_block - block->free_sectors;
625 
626 	do {
627 		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
628 				== SECTOR_FREE)
629 			return i;
630 
631 		if (++i == part->data_sectors_per_block)
632 			i = 0;
633 	}
634 	while(i != stop);
635 
636 	return -1;
637 }
638 
639 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
640 {
641 	struct partition *part = (struct partition*)dev;
642 	struct block *block;
643 	u_long addr;
644 	int i;
645 	int rc;
646 	size_t retlen;
647 	u16 entry;
648 
649 	if (part->current_block == -1 ||
650 		!part->blocks[part->current_block].free_sectors) {
651 
652 		rc = find_writable_block(part, old_addr);
653 		if (rc)
654 			goto err;
655 	}
656 
657 	block = &part->blocks[part->current_block];
658 
659 	i = find_free_sector(part, block);
660 
661 	if (i < 0) {
662 		rc = -ENOSPC;
663 		goto err;
664 	}
665 
666 	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
667 		block->offset;
668 	rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
669 		       (u_char *)buf);
670 
671 	if (!rc && retlen != SECTOR_SIZE)
672 		rc = -EIO;
673 
674 	if (rc) {
675 		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
676 				part->mbd.mtd->name, addr);
677 		goto err;
678 	}
679 
680 	part->sector_map[sector] = addr;
681 
682 	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
683 
684 	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
685 
686 	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
687 	rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
688 		       (u_char *)&entry);
689 
690 	if (!rc && retlen != sizeof(entry))
691 		rc = -EIO;
692 
693 	if (rc) {
694 		printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
695 				part->mbd.mtd->name, addr);
696 		goto err;
697 	}
698 	block->used_sectors++;
699 	block->free_sectors--;
700 
701 err:
702 	return rc;
703 }
704 
705 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
706 {
707 	struct partition *part = (struct partition*)dev;
708 	u_long old_addr;
709 	int i;
710 	int rc = 0;
711 
712 	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
713 
714 	if (part->reserved_block == -1) {
715 		rc = -EACCES;
716 		goto err;
717 	}
718 
719 	if (sector >= part->sector_count) {
720 		rc = -EIO;
721 		goto err;
722 	}
723 
724 	old_addr = part->sector_map[sector];
725 
726 	for (i=0; i<SECTOR_SIZE; i++) {
727 		if (!buf[i])
728 			continue;
729 
730 		rc = do_writesect(dev, sector, buf, &old_addr);
731 		if (rc)
732 			goto err;
733 		break;
734 	}
735 
736 	if (i == SECTOR_SIZE)
737 		part->sector_map[sector] = -1;
738 
739 	if (old_addr != -1)
740 		rc = mark_sector_deleted(part, old_addr);
741 
742 err:
743 	return rc;
744 }
745 
746 static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
747 {
748 	struct partition *part = (struct partition*)dev;
749 
750 	geo->heads = 1;
751 	geo->sectors = SECTORS_PER_TRACK;
752 	geo->cylinders = part->cylinders;
753 
754 	return 0;
755 }
756 
757 static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
758 {
759 	struct partition *part;
760 
761 	if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
762 		return;
763 
764 	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
765 	if (!part)
766 		return;
767 
768 	part->mbd.mtd = mtd;
769 
770 	if (block_size)
771 		part->block_size = block_size;
772 	else {
773 		if (!mtd->erasesize) {
774 			printk(KERN_WARNING PREFIX "please provide block_size");
775 			goto out;
776 		} else
777 			part->block_size = mtd->erasesize;
778 	}
779 
780 	if (scan_header(part) == 0) {
781 		part->mbd.size = part->sector_count;
782 		part->mbd.tr = tr;
783 		part->mbd.devnum = -1;
784 		if (!(mtd->flags & MTD_WRITEABLE))
785 			part->mbd.readonly = 1;
786 		else if (part->errors) {
787 			printk(KERN_WARNING PREFIX "'%s': errors found, "
788 					"setting read-only\n", mtd->name);
789 			part->mbd.readonly = 1;
790 		}
791 
792 		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
793 				mtd->name, mtd->type, mtd->flags);
794 
795 		if (!add_mtd_blktrans_dev((void*)part))
796 			return;
797 	}
798 out:
799 	kfree(part);
800 }
801 
802 static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
803 {
804 	struct partition *part = (struct partition*)dev;
805 	int i;
806 
807 	for (i=0; i<part->total_blocks; i++) {
808 		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
809 			part->mbd.mtd->name, i, part->blocks[i].erases);
810 	}
811 
812 	del_mtd_blktrans_dev(dev);
813 	vfree(part->sector_map);
814 	kfree(part->header_cache);
815 	kfree(part->blocks);
816 }
817 
818 static struct mtd_blktrans_ops rfd_ftl_tr = {
819 	.name		= "rfd",
820 	.major		= RFD_FTL_MAJOR,
821 	.part_bits	= PART_BITS,
822 	.blksize 	= SECTOR_SIZE,
823 
824 	.readsect	= rfd_ftl_readsect,
825 	.writesect	= rfd_ftl_writesect,
826 	.getgeo		= rfd_ftl_getgeo,
827 	.add_mtd	= rfd_ftl_add_mtd,
828 	.remove_dev	= rfd_ftl_remove_dev,
829 	.owner		= THIS_MODULE,
830 };
831 
832 static int __init init_rfd_ftl(void)
833 {
834 	return register_mtd_blktrans(&rfd_ftl_tr);
835 }
836 
837 static void __exit cleanup_rfd_ftl(void)
838 {
839 	deregister_mtd_blktrans(&rfd_ftl_tr);
840 }
841 
842 module_init(init_rfd_ftl);
843 module_exit(cleanup_rfd_ftl);
844 
845 MODULE_LICENSE("GPL");
846 MODULE_AUTHOR("Sean Young <sean@mess.org>");
847 MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
848 		"used by General Software's Embedded BIOS");
849 
850