xref: /linux/drivers/mtd/rfd_ftl.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  * rfd_ftl.c -- resident flash disk (flash translation layer)
3  *
4  * Copyright (C) 2005  Sean Young <sean@mess.org>
5  *
6  * $Id: rfd_ftl.c,v 1.5 2005/11/07 11:14:21 gleixner Exp $
7  *
8  * This type of flash translation layer (FTL) is used by the Embedded BIOS
9  * by General Software. It is known as the Resident Flash Disk (RFD), see:
10  *
11  *	http://www.gensw.com/pages/prod/bios/rfd.htm
12  *
13  * based on ftl.c
14  */
15 
16 #include <linux/hdreg.h>
17 #include <linux/init.h>
18 #include <linux/mtd/blktrans.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/vmalloc.h>
21 #include <linux/slab.h>
22 #include <linux/jiffies.h>
23 
24 #include <asm/types.h>
25 
26 #define const_cpu_to_le16	__constant_cpu_to_le16
27 
28 static int block_size = 0;
29 module_param(block_size, int, 0);
30 MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
31 
32 #define PREFIX "rfd_ftl: "
33 
34 /* This major has been assigned by device@lanana.org */
35 #ifndef RFD_FTL_MAJOR
36 #define RFD_FTL_MAJOR		256
37 #endif
38 
39 /* Maximum number of partitions in an FTL region */
40 #define PART_BITS		4
41 
42 /* An erase unit should start with this value */
43 #define RFD_MAGIC		0x9193
44 
45 /* the second value is 0xffff or 0xffc8; function unknown */
46 
47 /* the third value is always 0xffff, ignored */
48 
49 /* next is an array of mapping for each corresponding sector */
50 #define HEADER_MAP_OFFSET	3
51 #define SECTOR_DELETED		0x0000
52 #define SECTOR_ZERO		0xfffe
53 #define SECTOR_FREE		0xffff
54 
55 #define SECTOR_SIZE		512
56 
57 #define SECTORS_PER_TRACK	63
58 
59 struct block {
60 	enum {
61 		BLOCK_OK,
62 		BLOCK_ERASING,
63 		BLOCK_ERASED,
64 		BLOCK_FAILED
65 	} state;
66 	int free_sectors;
67 	int used_sectors;
68 	int erases;
69 	u_long offset;
70 };
71 
72 struct partition {
73 	struct mtd_blktrans_dev mbd;
74 
75 	u_int block_size;		/* size of erase unit */
76 	u_int total_blocks;		/* number of erase units */
77 	u_int header_sectors_per_block;	/* header sectors in erase unit */
78 	u_int data_sectors_per_block;	/* data sectors in erase unit */
79 	u_int sector_count;		/* sectors in translated disk */
80 	u_int header_size;		/* bytes in header sector */
81 	int reserved_block;		/* block next up for reclaim */
82 	int current_block;		/* block to write to */
83 	u16 *header_cache;		/* cached header */
84 
85 	int is_reclaiming;
86 	int cylinders;
87 	int errors;
88 	u_long *sector_map;
89 	struct block *blocks;
90 };
91 
92 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
93 
94 static int build_block_map(struct partition *part, int block_no)
95 {
96 	struct block *block = &part->blocks[block_no];
97 	int i;
98 
99 	block->offset = part->block_size * block_no;
100 
101 	if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
102 		block->state = BLOCK_ERASED; /* assumption */
103 		block->free_sectors = part->data_sectors_per_block;
104 		part->reserved_block = block_no;
105 		return 1;
106 	}
107 
108 	block->state = BLOCK_OK;
109 
110 	for (i=0; i<part->data_sectors_per_block; i++) {
111 		u16 entry;
112 
113 		entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
114 
115 		if (entry == SECTOR_DELETED)
116 			continue;
117 
118 		if (entry == SECTOR_FREE) {
119 			block->free_sectors++;
120 			continue;
121 		}
122 
123 		if (entry == SECTOR_ZERO)
124 			entry = 0;
125 
126 		if (entry >= part->sector_count) {
127 			printk(KERN_NOTICE PREFIX
128 				"'%s': unit #%d: entry %d corrupt, "
129 				"sector %d out of range\n",
130 				part->mbd.mtd->name, block_no, i, entry);
131 			continue;
132 		}
133 
134 		if (part->sector_map[entry] != -1) {
135 			printk(KERN_NOTICE PREFIX
136 				"'%s': more than one entry for sector %d\n",
137 				part->mbd.mtd->name, entry);
138 			part->errors = 1;
139 			continue;
140 		}
141 
142 		part->sector_map[entry] = block->offset +
143 			(i + part->header_sectors_per_block) * SECTOR_SIZE;
144 
145 		block->used_sectors++;
146 	}
147 
148 	if (block->free_sectors == part->data_sectors_per_block)
149 		part->reserved_block = block_no;
150 
151 	return 0;
152 }
153 
154 static int scan_header(struct partition *part)
155 {
156 	int sectors_per_block;
157 	int i, rc = -ENOMEM;
158 	int blocks_found;
159 	size_t retlen;
160 
161 	sectors_per_block = part->block_size / SECTOR_SIZE;
162 	part->total_blocks = part->mbd.mtd->size / part->block_size;
163 
164 	if (part->total_blocks < 2)
165 		return -ENOENT;
166 
167 	/* each erase block has three bytes header, followed by the map */
168 	part->header_sectors_per_block =
169 			((HEADER_MAP_OFFSET + sectors_per_block) *
170 		 	sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
171 
172 	part->data_sectors_per_block = sectors_per_block -
173 			part->header_sectors_per_block;
174 
175 	part->header_size = (HEADER_MAP_OFFSET +
176 			part->data_sectors_per_block) * sizeof(u16);
177 
178 	part->cylinders = (part->data_sectors_per_block *
179 			(part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
180 
181 	part->sector_count = part->cylinders * SECTORS_PER_TRACK;
182 
183 	part->current_block = -1;
184 	part->reserved_block = -1;
185 	part->is_reclaiming = 0;
186 
187 	part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
188 	if (!part->header_cache)
189 		goto err;
190 
191 	part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
192 			GFP_KERNEL);
193 	if (!part->blocks)
194 		goto err;
195 
196 	part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
197 	if (!part->sector_map) {
198 		printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
199 			"sector map", part->mbd.mtd->name);
200 		goto err;
201 	}
202 
203 	for (i=0; i<part->sector_count; i++)
204 		part->sector_map[i] = -1;
205 
206 	for (i=0, blocks_found=0; i<part->total_blocks; i++) {
207 		rc = part->mbd.mtd->read(part->mbd.mtd,
208 				i * part->block_size, part->header_size,
209 				&retlen, (u_char*)part->header_cache);
210 
211 		if (!rc && retlen != part->header_size)
212 			rc = -EIO;
213 
214 		if (rc)
215 			goto err;
216 
217 		if (!build_block_map(part, i))
218 			blocks_found++;
219 	}
220 
221 	if (blocks_found == 0) {
222 		printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
223 				part->mbd.mtd->name);
224 		rc = -ENOENT;
225 		goto err;
226 	}
227 
228 	if (part->reserved_block == -1) {
229 		printk(KERN_NOTICE PREFIX "'%s': no empty erase unit found\n",
230 				part->mbd.mtd->name);
231 
232 		part->errors = 1;
233 	}
234 
235 	return 0;
236 
237 err:
238 	vfree(part->sector_map);
239 	kfree(part->header_cache);
240 	kfree(part->blocks);
241 
242 	return rc;
243 }
244 
245 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
246 {
247 	struct partition *part = (struct partition*)dev;
248 	u_long addr;
249 	size_t retlen;
250 	int rc;
251 
252 	if (sector >= part->sector_count)
253 		return -EIO;
254 
255 	addr = part->sector_map[sector];
256 	if (addr != -1) {
257 		rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
258 						&retlen, (u_char*)buf);
259 		if (!rc && retlen != SECTOR_SIZE)
260 			rc = -EIO;
261 
262 		if (rc) {
263 			printk(KERN_WARNING PREFIX "error reading '%s' at "
264 				"0x%lx\n", part->mbd.mtd->name, addr);
265 			return rc;
266 		}
267 	} else
268 		memset(buf, 0, SECTOR_SIZE);
269 
270 	return 0;
271 }
272 
273 static void erase_callback(struct erase_info *erase)
274 {
275 	struct partition *part;
276 	u16 magic;
277 	int i, rc;
278 	size_t retlen;
279 
280 	part = (struct partition*)erase->priv;
281 
282 	i = erase->addr / part->block_size;
283 	if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) {
284 		printk(KERN_ERR PREFIX "erase callback for unknown offset %x "
285 				"on '%s'\n", erase->addr, part->mbd.mtd->name);
286 		return;
287 	}
288 
289 	if (erase->state != MTD_ERASE_DONE) {
290 		printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', "
291 				"state %d\n", erase->addr,
292 				part->mbd.mtd->name, erase->state);
293 
294 		part->blocks[i].state = BLOCK_FAILED;
295 		part->blocks[i].free_sectors = 0;
296 		part->blocks[i].used_sectors = 0;
297 
298 		kfree(erase);
299 
300 		return;
301 	}
302 
303 	magic = const_cpu_to_le16(RFD_MAGIC);
304 
305 	part->blocks[i].state = BLOCK_ERASED;
306 	part->blocks[i].free_sectors = part->data_sectors_per_block;
307 	part->blocks[i].used_sectors = 0;
308 	part->blocks[i].erases++;
309 
310 	rc = part->mbd.mtd->write(part->mbd.mtd,
311 		part->blocks[i].offset, sizeof(magic), &retlen,
312 		(u_char*)&magic);
313 
314 	if (!rc && retlen != sizeof(magic))
315 		rc = -EIO;
316 
317 	if (rc) {
318 		printk(KERN_NOTICE PREFIX "'%s': unable to write RFD "
319 				"header at 0x%lx\n",
320 				part->mbd.mtd->name,
321 				part->blocks[i].offset);
322 		part->blocks[i].state = BLOCK_FAILED;
323 	}
324 	else
325 		part->blocks[i].state = BLOCK_OK;
326 
327 	kfree(erase);
328 }
329 
330 static int erase_block(struct partition *part, int block)
331 {
332 	struct erase_info *erase;
333 	int rc = -ENOMEM;
334 
335 	erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
336 	if (!erase)
337 		goto err;
338 
339 	erase->mtd = part->mbd.mtd;
340 	erase->callback = erase_callback;
341 	erase->addr = part->blocks[block].offset;
342 	erase->len = part->block_size;
343 	erase->priv = (u_long)part;
344 
345 	part->blocks[block].state = BLOCK_ERASING;
346 	part->blocks[block].free_sectors = 0;
347 
348 	rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
349 
350 	if (rc) {
351 		printk(KERN_WARNING PREFIX "erase of region %x,%x on '%s' "
352 				"failed\n", erase->addr, erase->len,
353 				part->mbd.mtd->name);
354 		kfree(erase);
355 	}
356 
357 err:
358 	return rc;
359 }
360 
361 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
362 {
363 	void *sector_data;
364 	u16 *map;
365 	size_t retlen;
366 	int i, rc = -ENOMEM;
367 
368 	part->is_reclaiming = 1;
369 
370 	sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
371 	if (!sector_data)
372 		goto err3;
373 
374 	map = kmalloc(part->header_size, GFP_KERNEL);
375 	if (!map)
376 		goto err2;
377 
378 	rc = part->mbd.mtd->read(part->mbd.mtd,
379 		part->blocks[block_no].offset, part->header_size,
380 		&retlen, (u_char*)map);
381 
382 	if (!rc && retlen != part->header_size)
383 		rc = -EIO;
384 
385 	if (rc) {
386 		printk(KERN_NOTICE PREFIX "error reading '%s' at "
387 			"0x%lx\n", part->mbd.mtd->name,
388 			part->blocks[block_no].offset);
389 
390 		goto err;
391 	}
392 
393 	for (i=0; i<part->data_sectors_per_block; i++) {
394 		u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
395 		u_long addr;
396 
397 
398 		if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
399 			continue;
400 
401 		if (entry == SECTOR_ZERO)
402 			entry = 0;
403 
404 		/* already warned about and ignored in build_block_map() */
405 		if (entry >= part->sector_count)
406 			continue;
407 
408 		addr = part->blocks[block_no].offset +
409 			(i + part->header_sectors_per_block) * SECTOR_SIZE;
410 
411 		if (*old_sector == addr) {
412 			*old_sector = -1;
413 			if (!part->blocks[block_no].used_sectors--) {
414 				rc = erase_block(part, block_no);
415 				break;
416 			}
417 			continue;
418 		}
419 		rc = part->mbd.mtd->read(part->mbd.mtd, addr,
420 			SECTOR_SIZE, &retlen, sector_data);
421 
422 		if (!rc && retlen != SECTOR_SIZE)
423 			rc = -EIO;
424 
425 		if (rc) {
426 			printk(KERN_NOTICE PREFIX "'%s': Unable to "
427 				"read sector for relocation\n",
428 				part->mbd.mtd->name);
429 
430 			goto err;
431 		}
432 
433 		rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
434 				entry, sector_data);
435 
436 		if (rc)
437 			goto err;
438 	}
439 
440 err:
441 	kfree(map);
442 err2:
443 	kfree(sector_data);
444 err3:
445 	part->is_reclaiming = 0;
446 
447 	return rc;
448 }
449 
450 static int reclaim_block(struct partition *part, u_long *old_sector)
451 {
452 	int block, best_block, score, old_sector_block;
453 	int rc;
454 
455 	/* we have a race if sync doesn't exist */
456 	if (part->mbd.mtd->sync)
457 		part->mbd.mtd->sync(part->mbd.mtd);
458 
459 	score = 0x7fffffff; /* MAX_INT */
460 	best_block = -1;
461 	if (*old_sector != -1)
462 		old_sector_block = *old_sector / part->block_size;
463 	else
464 		old_sector_block = -1;
465 
466 	for (block=0; block<part->total_blocks; block++) {
467 		int this_score;
468 
469 		if (block == part->reserved_block)
470 			continue;
471 
472 		/*
473 		 * Postpone reclaiming if there is a free sector as
474 		 * more removed sectors is more efficient (have to move
475 		 * less).
476 		 */
477 		if (part->blocks[block].free_sectors)
478 			return 0;
479 
480 		this_score = part->blocks[block].used_sectors;
481 
482 		if (block == old_sector_block)
483 			this_score--;
484 		else {
485 			/* no point in moving a full block */
486 			if (part->blocks[block].used_sectors ==
487 					part->data_sectors_per_block)
488 				continue;
489 		}
490 
491 		this_score += part->blocks[block].erases;
492 
493 		if (this_score < score) {
494 			best_block = block;
495 			score = this_score;
496 		}
497 	}
498 
499 	if (best_block == -1)
500 		return -ENOSPC;
501 
502 	part->current_block = -1;
503 	part->reserved_block = best_block;
504 
505 	pr_debug("reclaim_block: reclaiming block #%d with %d used "
506 		 "%d free sectors\n", best_block,
507 		 part->blocks[best_block].used_sectors,
508 		 part->blocks[best_block].free_sectors);
509 
510 	if (part->blocks[best_block].used_sectors)
511 		rc = move_block_contents(part, best_block, old_sector);
512 	else
513 		rc = erase_block(part, best_block);
514 
515 	return rc;
516 }
517 
518 /*
519  * IMPROVE: It would be best to choose the block with the most deleted sectors,
520  * because if we fill that one up first it'll have the most chance of having
521  * the least live sectors at reclaim.
522  */
523 static int find_free_block(const struct partition *part)
524 {
525 	int block, stop;
526 
527 	block = part->current_block == -1 ?
528 			jiffies % part->total_blocks : part->current_block;
529 	stop = block;
530 
531 	do {
532 		if (part->blocks[block].free_sectors &&
533 				block != part->reserved_block)
534 			return block;
535 
536 		if (++block >= part->total_blocks)
537 			block = 0;
538 
539 	} while (block != stop);
540 
541 	return -1;
542 }
543 
544 static int find_writeable_block(struct partition *part, u_long *old_sector)
545 {
546 	int rc, block;
547 	size_t retlen;
548 
549 	block = find_free_block(part);
550 
551 	if (block == -1) {
552 		if (!part->is_reclaiming) {
553 			rc = reclaim_block(part, old_sector);
554 			if (rc)
555 				goto err;
556 
557 			block = find_free_block(part);
558 		}
559 
560 		if (block == -1) {
561 			rc = -ENOSPC;
562 			goto err;
563 		}
564 	}
565 
566 	rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
567 		part->header_size, &retlen, (u_char*)part->header_cache);
568 
569 	if (!rc && retlen != part->header_size)
570 		rc = -EIO;
571 
572 	if (rc) {
573 		printk(KERN_NOTICE PREFIX "'%s': unable to read header at "
574 				"0x%lx\n", part->mbd.mtd->name,
575 				part->blocks[block].offset);
576 		goto err;
577 	}
578 
579 	part->current_block = block;
580 
581 err:
582 	return rc;
583 }
584 
585 static int mark_sector_deleted(struct partition *part, u_long old_addr)
586 {
587 	int block, offset, rc;
588 	u_long addr;
589 	size_t retlen;
590 	u16 del = const_cpu_to_le16(SECTOR_DELETED);
591 
592 	block = old_addr / part->block_size;
593 	offset = (old_addr % part->block_size) / SECTOR_SIZE -
594 		part->header_sectors_per_block;
595 
596 	addr = part->blocks[block].offset +
597 			(HEADER_MAP_OFFSET + offset) * sizeof(u16);
598 	rc = part->mbd.mtd->write(part->mbd.mtd, addr,
599 		sizeof(del), &retlen, (u_char*)&del);
600 
601 	if (!rc && retlen != sizeof(del))
602 		rc = -EIO;
603 
604 	if (rc) {
605 		printk(KERN_WARNING PREFIX "error writing '%s' at "
606 			"0x%lx\n", part->mbd.mtd->name, addr);
607 		if (rc)
608 			goto err;
609 	}
610 	if (block == part->current_block)
611 		part->header_cache[offset + HEADER_MAP_OFFSET] = del;
612 
613 	part->blocks[block].used_sectors--;
614 
615 	if (!part->blocks[block].used_sectors &&
616 	    !part->blocks[block].free_sectors)
617 		rc = erase_block(part, block);
618 
619 err:
620 	return rc;
621 }
622 
623 static int find_free_sector(const struct partition *part, const struct block *block)
624 {
625 	int i, stop;
626 
627 	i = stop = part->data_sectors_per_block - block->free_sectors;
628 
629 	do {
630 		if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
631 				== SECTOR_FREE)
632 			return i;
633 
634 		if (++i == part->data_sectors_per_block)
635 			i = 0;
636 	}
637 	while(i != stop);
638 
639 	return -1;
640 }
641 
642 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
643 {
644 	struct partition *part = (struct partition*)dev;
645 	struct block *block;
646 	u_long addr;
647 	int i;
648 	int rc;
649 	size_t retlen;
650 	u16 entry;
651 
652 	if (part->current_block == -1 ||
653 		!part->blocks[part->current_block].free_sectors) {
654 
655 		rc = find_writeable_block(part, old_addr);
656 		if (rc)
657 			goto err;
658 	}
659 
660 	block = &part->blocks[part->current_block];
661 
662 	i = find_free_sector(part, block);
663 
664 	if (i < 0) {
665 		rc = -ENOSPC;
666 		goto err;
667 	}
668 
669 	addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
670 		block->offset;
671 	rc = part->mbd.mtd->write(part->mbd.mtd,
672 		addr, SECTOR_SIZE, &retlen, (u_char*)buf);
673 
674 	if (!rc && retlen != SECTOR_SIZE)
675 		rc = -EIO;
676 
677 	if (rc) {
678 		printk(KERN_WARNING PREFIX "error writing '%s' at 0x%lx\n",
679 				part->mbd.mtd->name, addr);
680 		if (rc)
681 			goto err;
682 	}
683 
684 	part->sector_map[sector] = addr;
685 
686 	entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
687 
688 	part->header_cache[i + HEADER_MAP_OFFSET] = entry;
689 
690 	addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
691 	rc = part->mbd.mtd->write(part->mbd.mtd, addr,
692 			sizeof(entry), &retlen, (u_char*)&entry);
693 
694 	if (!rc && retlen != sizeof(entry))
695 		rc = -EIO;
696 
697 	if (rc) {
698 		printk(KERN_WARNING PREFIX "error writing '%s' at 0x%lx\n",
699 				part->mbd.mtd->name, addr);
700 		if (rc)
701 			goto err;
702 	}
703 	block->used_sectors++;
704 	block->free_sectors--;
705 
706 err:
707 	return rc;
708 }
709 
710 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
711 {
712 	struct partition *part = (struct partition*)dev;
713 	u_long old_addr;
714 	int i;
715 	int rc = 0;
716 
717 	pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
718 
719 	if (part->reserved_block == -1) {
720 		rc = -EACCES;
721 		goto err;
722 	}
723 
724 	if (sector >= part->sector_count) {
725 		rc = -EIO;
726 		goto err;
727 	}
728 
729 	old_addr = part->sector_map[sector];
730 
731 	for (i=0; i<SECTOR_SIZE; i++) {
732 		if (!buf[i])
733 			continue;
734 
735 		rc = do_writesect(dev, sector, buf, &old_addr);
736 		if (rc)
737 			goto err;
738 		break;
739 	}
740 
741 	if (i == SECTOR_SIZE)
742 		part->sector_map[sector] = -1;
743 
744 	if (old_addr != -1)
745 		rc = mark_sector_deleted(part, old_addr);
746 
747 err:
748 	return rc;
749 }
750 
751 static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
752 {
753 	struct partition *part = (struct partition*)dev;
754 
755 	geo->heads = 1;
756 	geo->sectors = SECTORS_PER_TRACK;
757 	geo->cylinders = part->cylinders;
758 
759 	return 0;
760 }
761 
762 static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
763 {
764 	struct partition *part;
765 
766 	if (mtd->type != MTD_NORFLASH)
767 		return;
768 
769 	part = kcalloc(1, sizeof(struct partition), GFP_KERNEL);
770 	if (!part)
771 		return;
772 
773 	part->mbd.mtd = mtd;
774 
775 	if (block_size)
776 		part->block_size = block_size;
777 	else {
778 		if (!mtd->erasesize) {
779 			printk(KERN_NOTICE PREFIX "please provide block_size");
780 			return;
781 		}
782 		else
783 			part->block_size = mtd->erasesize;
784 	}
785 
786 	if (scan_header(part) == 0) {
787 		part->mbd.size = part->sector_count;
788 		part->mbd.blksize = SECTOR_SIZE;
789 		part->mbd.tr = tr;
790 		part->mbd.devnum = -1;
791 		if (!(mtd->flags & MTD_WRITEABLE))
792 			part->mbd.readonly = 1;
793 		else if (part->errors) {
794 			printk(KERN_NOTICE PREFIX "'%s': errors found, "
795 					"setting read-only", mtd->name);
796 			part->mbd.readonly = 1;
797 		}
798 
799 		printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
800 				mtd->name, mtd->type, mtd->flags);
801 
802 		if (!add_mtd_blktrans_dev((void*)part))
803 			return;
804 	}
805 
806 	kfree(part);
807 }
808 
809 static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
810 {
811 	struct partition *part = (struct partition*)dev;
812 	int i;
813 
814 	for (i=0; i<part->total_blocks; i++) {
815 		pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
816 			part->mbd.mtd->name, i, part->blocks[i].erases);
817 	}
818 
819 	del_mtd_blktrans_dev(dev);
820 	vfree(part->sector_map);
821 	kfree(part->header_cache);
822 	kfree(part->blocks);
823 	kfree(part);
824 }
825 
826 struct mtd_blktrans_ops rfd_ftl_tr = {
827 	.name		= "rfd",
828 	.major		= RFD_FTL_MAJOR,
829 	.part_bits	= PART_BITS,
830 	.readsect	= rfd_ftl_readsect,
831 	.writesect	= rfd_ftl_writesect,
832 	.getgeo		= rfd_ftl_getgeo,
833 	.add_mtd	= rfd_ftl_add_mtd,
834 	.remove_dev	= rfd_ftl_remove_dev,
835 	.owner		= THIS_MODULE,
836 };
837 
838 static int __init init_rfd_ftl(void)
839 {
840 	return register_mtd_blktrans(&rfd_ftl_tr);
841 }
842 
843 static void __exit cleanup_rfd_ftl(void)
844 {
845 	deregister_mtd_blktrans(&rfd_ftl_tr);
846 }
847 
848 module_init(init_rfd_ftl);
849 module_exit(cleanup_rfd_ftl);
850 
851 MODULE_LICENSE("GPL");
852 MODULE_AUTHOR("Sean Young <sean@mess.org>");
853 MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
854 		"used by General Software's Embedded BIOS");
855 
856