1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright © 2009 - Maxim Levitsky
4 * SmartMedia/xD translation layer
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/random.h>
10 #include <linux/hdreg.h>
11 #include <linux/kthread.h>
12 #include <linux/freezer.h>
13 #include <linux/sysfs.h>
14 #include <linux/bitops.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/nand-ecc-sw-hamming.h>
17 #include "nand/raw/sm_common.h"
18 #include "sm_ftl.h"
19
20
21
22 static struct workqueue_struct *cache_flush_workqueue;
23
24 static int cache_timeout = 1000;
25 module_param(cache_timeout, int, S_IRUGO);
26 MODULE_PARM_DESC(cache_timeout,
27 "Timeout (in ms) for cache flush (1000 ms default");
28
29 static int debug;
30 module_param(debug, int, S_IRUGO | S_IWUSR);
31 MODULE_PARM_DESC(debug, "Debug level (0-2)");
32
33
34 /* ------------------- sysfs attributes ---------------------------------- */
35 struct sm_sysfs_attribute {
36 struct device_attribute dev_attr;
37 char *data;
38 int len;
39 };
40
sm_attr_show(struct device * dev,struct device_attribute * attr,char * buf)41 static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
42 char *buf)
43 {
44 struct sm_sysfs_attribute *sm_attr =
45 container_of(attr, struct sm_sysfs_attribute, dev_attr);
46
47 return sysfs_emit(buf, "%.*s", sm_attr->len, sm_attr->data);
48 }
49
50
51 #define NUM_ATTRIBUTES 1
52 #define SM_CIS_VENDOR_OFFSET 0x59
sm_create_sysfs_attributes(struct sm_ftl * ftl)53 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
54 {
55 struct attribute_group *attr_group;
56 struct attribute **attributes;
57 struct sm_sysfs_attribute *vendor_attribute;
58 char *vendor;
59
60 vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
61 SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
62 if (!vendor)
63 goto error1;
64
65 /* Initialize sysfs attributes */
66 vendor_attribute =
67 kzalloc_obj(struct sm_sysfs_attribute);
68 if (!vendor_attribute)
69 goto error2;
70
71 sysfs_attr_init(&vendor_attribute->dev_attr.attr);
72
73 vendor_attribute->data = vendor;
74 vendor_attribute->len = strlen(vendor);
75 vendor_attribute->dev_attr.attr.name = "vendor";
76 vendor_attribute->dev_attr.attr.mode = S_IRUGO;
77 vendor_attribute->dev_attr.show = sm_attr_show;
78
79
80 /* Create array of pointers to the attributes */
81 attributes = kzalloc_objs(struct attribute *, NUM_ATTRIBUTES + 1);
82 if (!attributes)
83 goto error3;
84 attributes[0] = &vendor_attribute->dev_attr.attr;
85
86 /* Finally create the attribute group */
87 attr_group = kzalloc_obj(struct attribute_group);
88 if (!attr_group)
89 goto error4;
90 attr_group->attrs = attributes;
91 return attr_group;
92 error4:
93 kfree(attributes);
94 error3:
95 kfree(vendor_attribute);
96 error2:
97 kfree(vendor);
98 error1:
99 return NULL;
100 }
101
sm_delete_sysfs_attributes(struct sm_ftl * ftl)102 static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
103 {
104 struct attribute **attributes = ftl->disk_attributes->attrs;
105 int i;
106
107 for (i = 0; attributes[i] ; i++) {
108
109 struct device_attribute *dev_attr = container_of(attributes[i],
110 struct device_attribute, attr);
111
112 struct sm_sysfs_attribute *sm_attr =
113 container_of(dev_attr,
114 struct sm_sysfs_attribute, dev_attr);
115
116 kfree(sm_attr->data);
117 kfree(sm_attr);
118 }
119
120 kfree(ftl->disk_attributes->attrs);
121 kfree(ftl->disk_attributes);
122 }
123
124
125 /* ----------------------- oob helpers -------------------------------------- */
126
sm_get_lba(uint8_t * lba)127 static int sm_get_lba(uint8_t *lba)
128 {
129 /* check fixed bits */
130 if ((lba[0] & 0xF8) != 0x10)
131 return -2;
132
133 /* check parity - endianness doesn't matter */
134 if (hweight16(*(uint16_t *)lba) & 1)
135 return -2;
136
137 return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
138 }
139
140
141 /*
142 * Read LBA associated with block
143 * returns -1, if block is erased
144 * returns -2 if error happens
145 */
sm_read_lba(struct sm_oob * oob)146 static int sm_read_lba(struct sm_oob *oob)
147 {
148 static const uint32_t erased_pattern[4] = {
149 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
150
151 uint16_t lba_test;
152 int lba;
153
154 /* First test for erased block */
155 if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
156 return -1;
157
158 /* Now check if both copies of the LBA differ too much */
159 lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
160 if (lba_test && !is_power_of_2(lba_test))
161 return -2;
162
163 /* And read it */
164 lba = sm_get_lba(oob->lba_copy1);
165
166 if (lba == -2)
167 lba = sm_get_lba(oob->lba_copy2);
168
169 return lba;
170 }
171
sm_write_lba(struct sm_oob * oob,uint16_t lba)172 static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
173 {
174 uint8_t tmp[2];
175
176 WARN_ON(lba >= 1000);
177
178 tmp[0] = 0x10 | ((lba >> 7) & 0x07);
179 tmp[1] = (lba << 1) & 0xFF;
180
181 if (hweight16(*(uint16_t *)tmp) & 0x01)
182 tmp[1] |= 1;
183
184 oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
185 oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
186 }
187
188
189 /* Make offset from parts */
sm_mkoffset(struct sm_ftl * ftl,int zone,int block,int boffset)190 static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
191 {
192 WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
193 WARN_ON(zone < 0 || zone >= ftl->zone_count);
194 WARN_ON(block >= ftl->zone_size);
195 WARN_ON(boffset >= ftl->block_size);
196
197 if (block == -1)
198 return -1;
199
200 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
201 }
202
203 /* Breaks offset into parts */
sm_break_offset(struct sm_ftl * ftl,loff_t loffset,int * zone,int * block,int * boffset)204 static void sm_break_offset(struct sm_ftl *ftl, loff_t loffset,
205 int *zone, int *block, int *boffset)
206 {
207 u64 offset = loffset;
208 *boffset = do_div(offset, ftl->block_size);
209 *block = do_div(offset, ftl->max_lba);
210 *zone = offset >= ftl->zone_count ? -1 : offset;
211 }
212
213 /* ---------------------- low level IO ------------------------------------- */
214
sm_correct_sector(uint8_t * buffer,struct sm_oob * oob)215 static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
216 {
217 bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
218 uint8_t ecc[3];
219
220 ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
221 if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
222 sm_order) < 0)
223 return -EIO;
224
225 buffer += SM_SMALL_PAGE;
226
227 ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order);
228 if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
229 sm_order) < 0)
230 return -EIO;
231 return 0;
232 }
233
234 /* Reads a sector + oob*/
sm_read_sector(struct sm_ftl * ftl,int zone,int block,int boffset,uint8_t * buffer,struct sm_oob * oob)235 static int sm_read_sector(struct sm_ftl *ftl,
236 int zone, int block, int boffset,
237 uint8_t *buffer, struct sm_oob *oob)
238 {
239 struct mtd_info *mtd = ftl->trans->mtd;
240 struct mtd_oob_ops ops = { };
241 struct sm_oob tmp_oob;
242 int ret = -EIO;
243 int try = 0;
244
245 /* FTL can contain -1 entries that are by default filled with bits */
246 if (block == -1) {
247 if (buffer)
248 memset(buffer, 0xFF, SM_SECTOR_SIZE);
249 return 0;
250 }
251
252 /* User might not need the oob, but we do for data verification */
253 if (!oob)
254 oob = &tmp_oob;
255
256 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
257 ops.ooboffs = 0;
258 ops.ooblen = SM_OOB_SIZE;
259 ops.oobbuf = (void *)oob;
260 ops.len = SM_SECTOR_SIZE;
261 ops.datbuf = buffer;
262
263 again:
264 if (try++) {
265 /* Avoid infinite recursion on CIS reads, sm_recheck_media
266 * won't help anyway
267 */
268 if (zone == 0 && block == ftl->cis_block && boffset ==
269 ftl->cis_boffset)
270 return ret;
271
272 /* Test if media is stable */
273 if (try == 3 || sm_recheck_media(ftl))
274 return ret;
275 }
276
277 /* Unfortunately, oob read will _always_ succeed,
278 * despite card removal.....
279 */
280 ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
281
282 /* Test for unknown errors */
283 if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
284 dbg("read of block %d at zone %d, failed due to error (%d)",
285 block, zone, ret);
286 goto again;
287 }
288
289 /* Do a basic test on the oob, to guard against returned garbage */
290 if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
291 goto again;
292
293 /* This should never happen, unless there is a bug in the mtd driver */
294 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
295 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
296
297 if (!buffer)
298 return 0;
299
300 /* Test if sector marked as bad */
301 if (!sm_sector_valid(oob)) {
302 dbg("read of block %d at zone %d, failed because it is marked"
303 " as bad" , block, zone);
304 goto again;
305 }
306
307 /* Test ECC*/
308 if (mtd_is_eccerr(ret) ||
309 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
310
311 dbg("read of block %d at zone %d, failed due to ECC error",
312 block, zone);
313 goto again;
314 }
315
316 return 0;
317 }
318
319 /* Writes a sector to media */
sm_write_sector(struct sm_ftl * ftl,int zone,int block,int boffset,uint8_t * buffer,struct sm_oob * oob)320 static int sm_write_sector(struct sm_ftl *ftl,
321 int zone, int block, int boffset,
322 uint8_t *buffer, struct sm_oob *oob)
323 {
324 struct mtd_oob_ops ops = { };
325 struct mtd_info *mtd = ftl->trans->mtd;
326 int ret;
327
328 BUG_ON(ftl->readonly);
329
330 if (zone == 0 && (block == ftl->cis_block || block == 0)) {
331 dbg("attempted to write the CIS!");
332 return -EIO;
333 }
334
335 if (ftl->unstable)
336 return -EIO;
337
338 ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
339 ops.len = SM_SECTOR_SIZE;
340 ops.datbuf = buffer;
341 ops.ooboffs = 0;
342 ops.ooblen = SM_OOB_SIZE;
343 ops.oobbuf = (void *)oob;
344
345 ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
346
347 /* Now we assume that hardware will catch write bitflip errors */
348
349 if (ret) {
350 dbg("write to block %d at zone %d, failed with error %d",
351 block, zone, ret);
352
353 sm_recheck_media(ftl);
354 return ret;
355 }
356
357 /* This should never happen, unless there is a bug in the driver */
358 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
359 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
360
361 return 0;
362 }
363
364 /* ------------------------ block IO ------------------------------------- */
365
366 /* Write a block using data and lba, and invalid sector bitmap */
sm_write_block(struct sm_ftl * ftl,uint8_t * buf,int zone,int block,int lba,unsigned long invalid_bitmap)367 static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
368 int zone, int block, int lba,
369 unsigned long invalid_bitmap)
370 {
371 bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC);
372 struct sm_oob oob;
373 int boffset;
374 int retry = 0;
375
376 /* Initialize the oob with requested values */
377 memset(&oob, 0xFF, SM_OOB_SIZE);
378 sm_write_lba(&oob, lba);
379 restart:
380 if (ftl->unstable)
381 return -EIO;
382
383 for (boffset = 0; boffset < ftl->block_size;
384 boffset += SM_SECTOR_SIZE) {
385
386 oob.data_status = 0xFF;
387
388 if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
389
390 sm_printk("sector %d of block at LBA %d of zone %d"
391 " couldn't be read, marking it as invalid",
392 boffset / SM_SECTOR_SIZE, lba, zone);
393
394 oob.data_status = 0;
395 }
396
397 if (ftl->smallpagenand) {
398 ecc_sw_hamming_calculate(buf + boffset,
399 SM_SMALL_PAGE, oob.ecc1,
400 sm_order);
401
402 ecc_sw_hamming_calculate(buf + boffset + SM_SMALL_PAGE,
403 SM_SMALL_PAGE, oob.ecc2,
404 sm_order);
405 }
406 if (!sm_write_sector(ftl, zone, block, boffset,
407 buf + boffset, &oob))
408 continue;
409
410 if (!retry) {
411
412 /* If write fails. try to erase the block */
413 /* This is safe, because we never write in blocks
414 * that contain valuable data.
415 * This is intended to repair block that are marked
416 * as erased, but that isn't fully erased
417 */
418
419 if (sm_erase_block(ftl, zone, block, 0))
420 return -EIO;
421
422 retry = 1;
423 goto restart;
424 } else {
425 sm_mark_block_bad(ftl, zone, block);
426 return -EIO;
427 }
428 }
429 return 0;
430 }
431
432
433 /* Mark whole block at offset 'offs' as bad. */
sm_mark_block_bad(struct sm_ftl * ftl,int zone,int block)434 static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
435 {
436 struct sm_oob oob;
437 int boffset;
438
439 memset(&oob, 0xFF, SM_OOB_SIZE);
440 oob.block_status = 0xF0;
441
442 if (ftl->unstable)
443 return;
444
445 if (sm_recheck_media(ftl))
446 return;
447
448 sm_printk("marking block %d of zone %d as bad", block, zone);
449
450 /* We aren't checking the return value, because we don't care */
451 /* This also fails on fake xD cards, but I guess these won't expose
452 * any bad blocks till fail completely
453 */
454 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
455 sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
456 }
457
458 /*
459 * Erase a block within a zone
460 * If erase succeeds, it updates free block fifo, otherwise marks block as bad
461 */
sm_erase_block(struct sm_ftl * ftl,int zone_num,uint16_t block,int put_free)462 static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
463 int put_free)
464 {
465 struct ftl_zone *zone = &ftl->zones[zone_num];
466 struct mtd_info *mtd = ftl->trans->mtd;
467 struct erase_info erase;
468
469 erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
470 erase.len = ftl->block_size;
471
472 if (ftl->unstable)
473 return -EIO;
474
475 BUG_ON(ftl->readonly);
476
477 if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
478 sm_printk("attempted to erase the CIS!");
479 return -EIO;
480 }
481
482 if (mtd_erase(mtd, &erase)) {
483 sm_printk("erase of block %d in zone %d failed",
484 block, zone_num);
485 goto error;
486 }
487
488 if (put_free)
489 kfifo_in(&zone->free_sectors,
490 (const unsigned char *)&block, sizeof(block));
491
492 return 0;
493 error:
494 sm_mark_block_bad(ftl, zone_num, block);
495 return -EIO;
496 }
497
498 /* Thoroughly test that block is valid. */
sm_check_block(struct sm_ftl * ftl,int zone,int block)499 static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
500 {
501 int boffset;
502 struct sm_oob oob;
503 int lbas[] = { -3, 0, 0, 0 };
504 int i = 0;
505 int test_lba;
506
507
508 /* First just check that block doesn't look fishy */
509 /* Only blocks that are valid or are sliced in two parts, are
510 * accepted
511 */
512 for (boffset = 0; boffset < ftl->block_size;
513 boffset += SM_SECTOR_SIZE) {
514
515 /* This shouldn't happen anyway */
516 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
517 return -2;
518
519 test_lba = sm_read_lba(&oob);
520
521 if (lbas[i] != test_lba)
522 lbas[++i] = test_lba;
523
524 /* If we found three different LBAs, something is fishy */
525 if (i == 3)
526 return -EIO;
527 }
528
529 /* If the block is sliced (partially erased usually) erase it */
530 if (i == 2) {
531 sm_erase_block(ftl, zone, block, 1);
532 return 1;
533 }
534
535 return 0;
536 }
537
538 /* ----------------- media scanning --------------------------------- */
539 static const struct chs_entry chs_table[] = {
540 { 1, 125, 4, 4 },
541 { 2, 125, 4, 8 },
542 { 4, 250, 4, 8 },
543 { 8, 250, 4, 16 },
544 { 16, 500, 4, 16 },
545 { 32, 500, 8, 16 },
546 { 64, 500, 8, 32 },
547 { 128, 500, 16, 32 },
548 { 256, 1000, 16, 32 },
549 { 512, 1015, 32, 63 },
550 { 1024, 985, 33, 63 },
551 { 2048, 985, 33, 63 },
552 { 0 },
553 };
554
555
556 static const uint8_t cis_signature[] = {
557 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
558 };
559 /* Find out media parameters.
560 * This ideally has to be based on nand id, but for now device size is enough
561 */
sm_get_media_info(struct sm_ftl * ftl,struct mtd_info * mtd)562 static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
563 {
564 int i;
565 int size_in_megs = mtd->size / (1024 * 1024);
566
567 ftl->readonly = mtd->type == MTD_ROM;
568
569 /* Manual settings for very old devices */
570 ftl->zone_count = 1;
571 ftl->smallpagenand = 0;
572
573 switch (size_in_megs) {
574 case 1:
575 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
576 ftl->zone_size = 256;
577 ftl->max_lba = 250;
578 ftl->block_size = 8 * SM_SECTOR_SIZE;
579 ftl->smallpagenand = 1;
580
581 break;
582 case 2:
583 /* 2 MiB flash SmartMedia (256 byte pages)*/
584 if (mtd->writesize == SM_SMALL_PAGE) {
585 ftl->zone_size = 512;
586 ftl->max_lba = 500;
587 ftl->block_size = 8 * SM_SECTOR_SIZE;
588 ftl->smallpagenand = 1;
589 /* 2 MiB rom SmartMedia */
590 } else {
591
592 if (!ftl->readonly)
593 return -ENODEV;
594
595 ftl->zone_size = 256;
596 ftl->max_lba = 250;
597 ftl->block_size = 16 * SM_SECTOR_SIZE;
598 }
599 break;
600 case 4:
601 /* 4 MiB flash/rom SmartMedia device */
602 ftl->zone_size = 512;
603 ftl->max_lba = 500;
604 ftl->block_size = 16 * SM_SECTOR_SIZE;
605 break;
606 case 8:
607 /* 8 MiB flash/rom SmartMedia device */
608 ftl->zone_size = 1024;
609 ftl->max_lba = 1000;
610 ftl->block_size = 16 * SM_SECTOR_SIZE;
611 }
612
613 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
614 * sizes. SmartMedia cards exist up to 128 MiB and have same layout
615 */
616 if (size_in_megs >= 16) {
617 ftl->zone_count = size_in_megs / 16;
618 ftl->zone_size = 1024;
619 ftl->max_lba = 1000;
620 ftl->block_size = 32 * SM_SECTOR_SIZE;
621 }
622
623 /* Test for proper write,erase and oob sizes */
624 if (mtd->erasesize > ftl->block_size)
625 return -ENODEV;
626
627 if (mtd->writesize > SM_SECTOR_SIZE)
628 return -ENODEV;
629
630 if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
631 return -ENODEV;
632
633 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
634 return -ENODEV;
635
636 /* We use OOB */
637 if (!mtd_has_oob(mtd))
638 return -ENODEV;
639
640 /* Find geometry information */
641 for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
642 if (chs_table[i].size == size_in_megs) {
643 ftl->cylinders = chs_table[i].cyl;
644 ftl->heads = chs_table[i].head;
645 ftl->sectors = chs_table[i].sec;
646 return 0;
647 }
648 }
649
650 sm_printk("media has unknown size : %dMiB", size_in_megs);
651 ftl->cylinders = 985;
652 ftl->heads = 33;
653 ftl->sectors = 63;
654 return 0;
655 }
656
657 /* Validate the CIS */
sm_read_cis(struct sm_ftl * ftl)658 static int sm_read_cis(struct sm_ftl *ftl)
659 {
660 struct sm_oob oob;
661
662 if (sm_read_sector(ftl,
663 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
664 return -EIO;
665
666 if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
667 return -EIO;
668
669 if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
670 cis_signature, sizeof(cis_signature))) {
671 return 0;
672 }
673
674 return -EIO;
675 }
676
677 /* Scan the media for the CIS */
sm_find_cis(struct sm_ftl * ftl)678 static int sm_find_cis(struct sm_ftl *ftl)
679 {
680 struct sm_oob oob;
681 int block, boffset;
682 int block_found = 0;
683 int cis_found = 0;
684
685 /* Search for first valid block */
686 for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
687
688 if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
689 continue;
690
691 if (!sm_block_valid(&oob))
692 continue;
693 block_found = 1;
694 break;
695 }
696
697 if (!block_found)
698 return -EIO;
699
700 /* Search for first valid sector in this block */
701 for (boffset = 0 ; boffset < ftl->block_size;
702 boffset += SM_SECTOR_SIZE) {
703
704 if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
705 continue;
706
707 if (!sm_sector_valid(&oob))
708 continue;
709 break;
710 }
711
712 if (boffset == ftl->block_size)
713 return -EIO;
714
715 ftl->cis_block = block;
716 ftl->cis_boffset = boffset;
717 ftl->cis_page_offset = 0;
718
719 cis_found = !sm_read_cis(ftl);
720
721 if (!cis_found) {
722 ftl->cis_page_offset = SM_SMALL_PAGE;
723 cis_found = !sm_read_cis(ftl);
724 }
725
726 if (cis_found) {
727 dbg("CIS block found at offset %x",
728 block * ftl->block_size +
729 boffset + ftl->cis_page_offset);
730 return 0;
731 }
732 return -EIO;
733 }
734
735 /* Basic test to determine if underlying mtd device if functional */
sm_recheck_media(struct sm_ftl * ftl)736 static int sm_recheck_media(struct sm_ftl *ftl)
737 {
738 if (sm_read_cis(ftl)) {
739
740 if (!ftl->unstable) {
741 sm_printk("media unstable, not allowing writes");
742 ftl->unstable = 1;
743 }
744 return -EIO;
745 }
746 return 0;
747 }
748
749 /* Initialize a FTL zone */
sm_init_zone(struct sm_ftl * ftl,int zone_num)750 static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
751 {
752 struct ftl_zone *zone = &ftl->zones[zone_num];
753 struct sm_oob oob;
754 uint16_t block;
755 int lba;
756 int i = 0;
757 int len;
758
759 dbg("initializing zone %d", zone_num);
760
761 /* Allocate memory for FTL table */
762 zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL);
763
764 if (!zone->lba_to_phys_table)
765 return -ENOMEM;
766 memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
767
768
769 /* Allocate memory for free sectors FIFO */
770 if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
771 kfree(zone->lba_to_phys_table);
772 return -ENOMEM;
773 }
774
775 /* Now scan the zone */
776 for (block = 0 ; block < ftl->zone_size ; block++) {
777
778 /* Skip blocks till the CIS (including) */
779 if (zone_num == 0 && block <= ftl->cis_block)
780 continue;
781
782 /* Read the oob of first sector */
783 if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) {
784 kfifo_free(&zone->free_sectors);
785 kfree(zone->lba_to_phys_table);
786 return -EIO;
787 }
788
789 /* Test to see if block is erased. It is enough to test
790 * first sector, because erase happens in one shot
791 */
792 if (sm_block_erased(&oob)) {
793 kfifo_in(&zone->free_sectors,
794 (unsigned char *)&block, 2);
795 continue;
796 }
797
798 /* If block is marked as bad, skip it */
799 /* This assumes we can trust first sector*/
800 /* However the way the block valid status is defined, ensures
801 * very low probability of failure here
802 */
803 if (!sm_block_valid(&oob)) {
804 dbg("PH %04d <-> <marked bad>", block);
805 continue;
806 }
807
808
809 lba = sm_read_lba(&oob);
810
811 /* Invalid LBA means that block is damaged. */
812 /* We can try to erase it, or mark it as bad, but
813 * lets leave that to recovery application
814 */
815 if (lba == -2 || lba >= ftl->max_lba) {
816 dbg("PH %04d <-> LBA %04d(bad)", block, lba);
817 continue;
818 }
819
820
821 /* If there is no collision,
822 * just put the sector in the FTL table
823 */
824 if (zone->lba_to_phys_table[lba] < 0) {
825 dbg_verbose("PH %04d <-> LBA %04d", block, lba);
826 zone->lba_to_phys_table[lba] = block;
827 continue;
828 }
829
830 sm_printk("collision"
831 " of LBA %d between blocks %d and %d in zone %d",
832 lba, zone->lba_to_phys_table[lba], block, zone_num);
833
834 /* Test that this block is valid*/
835 if (sm_check_block(ftl, zone_num, block))
836 continue;
837
838 /* Test now the old block */
839 if (sm_check_block(ftl, zone_num,
840 zone->lba_to_phys_table[lba])) {
841 zone->lba_to_phys_table[lba] = block;
842 continue;
843 }
844
845 /* If both blocks are valid and share same LBA, it means that
846 * they hold different versions of same data. It not
847 * known which is more recent, thus just erase one of them
848 */
849 sm_printk("both blocks are valid, erasing the later");
850 sm_erase_block(ftl, zone_num, block, 1);
851 }
852
853 dbg("zone initialized");
854 zone->initialized = 1;
855
856 /* No free sectors, means that the zone is heavily damaged, write won't
857 * work, but it can still can be (partially) read
858 */
859 if (!kfifo_len(&zone->free_sectors)) {
860 sm_printk("no free blocks in zone %d", zone_num);
861 return 0;
862 }
863
864 /* Randomize first block we write to */
865 get_random_bytes(&i, 2);
866 i %= (kfifo_len(&zone->free_sectors) / 2);
867
868 while (i--) {
869 len = kfifo_out(&zone->free_sectors,
870 (unsigned char *)&block, 2);
871 WARN_ON(len != 2);
872 kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
873 }
874 return 0;
875 }
876
877 /* Get and automatically initialize an FTL mapping for one zone */
sm_get_zone(struct sm_ftl * ftl,int zone_num)878 static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
879 {
880 struct ftl_zone *zone;
881 int error;
882
883 BUG_ON(zone_num >= ftl->zone_count);
884 zone = &ftl->zones[zone_num];
885
886 if (!zone->initialized) {
887 error = sm_init_zone(ftl, zone_num);
888
889 if (error)
890 return ERR_PTR(error);
891 }
892 return zone;
893 }
894
895
896 /* ----------------- cache handling ------------------------------------------*/
897
898 /* Initialize the one block cache */
sm_cache_init(struct sm_ftl * ftl)899 static void sm_cache_init(struct sm_ftl *ftl)
900 {
901 ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
902 ftl->cache_clean = 1;
903 ftl->cache_zone = -1;
904 ftl->cache_block = -1;
905 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
906 }
907
908 /* Put sector in one block cache */
sm_cache_put(struct sm_ftl * ftl,char * buffer,int boffset)909 static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
910 {
911 memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
912 clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
913 ftl->cache_clean = 0;
914 }
915
916 /* Read a sector from the cache */
sm_cache_get(struct sm_ftl * ftl,char * buffer,int boffset)917 static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
918 {
919 if (test_bit(boffset / SM_SECTOR_SIZE,
920 &ftl->cache_data_invalid_bitmap))
921 return -1;
922
923 memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
924 return 0;
925 }
926
927 /* Write the cache to hardware */
sm_cache_flush(struct sm_ftl * ftl)928 static int sm_cache_flush(struct sm_ftl *ftl)
929 {
930 struct ftl_zone *zone;
931
932 int sector_num;
933 uint16_t write_sector;
934 int zone_num = ftl->cache_zone;
935 int block_num;
936
937 if (ftl->cache_clean)
938 return 0;
939
940 if (ftl->unstable)
941 return -EIO;
942
943 BUG_ON(zone_num < 0);
944 zone = &ftl->zones[zone_num];
945 block_num = zone->lba_to_phys_table[ftl->cache_block];
946
947
948 /* Try to read all unread areas of the cache block*/
949 for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
950 ftl->block_size / SM_SECTOR_SIZE) {
951
952 if (!sm_read_sector(ftl,
953 zone_num, block_num, sector_num * SM_SECTOR_SIZE,
954 ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
955 clear_bit(sector_num,
956 &ftl->cache_data_invalid_bitmap);
957 }
958 restart:
959
960 if (ftl->unstable)
961 return -EIO;
962
963 /* If there are no spare blocks, */
964 /* we could still continue by erasing/writing the current block,
965 * but for such worn out media it doesn't worth the trouble,
966 * and the dangers
967 */
968 if (kfifo_out(&zone->free_sectors,
969 (unsigned char *)&write_sector, 2) != 2) {
970 dbg("no free sectors for write!");
971 return -EIO;
972 }
973
974
975 if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
976 ftl->cache_block, ftl->cache_data_invalid_bitmap))
977 goto restart;
978
979 /* Update the FTL table */
980 zone->lba_to_phys_table[ftl->cache_block] = write_sector;
981
982 /* Write successful, so erase and free the old block */
983 if (block_num > 0)
984 sm_erase_block(ftl, zone_num, block_num, 1);
985
986 sm_cache_init(ftl);
987 return 0;
988 }
989
990
991 /* flush timer, runs a second after last write */
sm_cache_flush_timer(struct timer_list * t)992 static void sm_cache_flush_timer(struct timer_list *t)
993 {
994 struct sm_ftl *ftl = timer_container_of(ftl, t, timer);
995 queue_work(cache_flush_workqueue, &ftl->flush_work);
996 }
997
998 /* cache flush work, kicked by timer */
sm_cache_flush_work(struct work_struct * work)999 static void sm_cache_flush_work(struct work_struct *work)
1000 {
1001 struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
1002 mutex_lock(&ftl->mutex);
1003 sm_cache_flush(ftl);
1004 mutex_unlock(&ftl->mutex);
1005 return;
1006 }
1007
1008 /* ---------------- outside interface -------------------------------------- */
1009
1010 /* outside interface: read a sector */
sm_read(struct mtd_blktrans_dev * dev,unsigned long sect_no,char * buf)1011 static int sm_read(struct mtd_blktrans_dev *dev,
1012 unsigned long sect_no, char *buf)
1013 {
1014 struct sm_ftl *ftl = dev->priv;
1015 struct ftl_zone *zone;
1016 int error = 0, in_cache = 0;
1017 int zone_num, block, boffset;
1018
1019 sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
1020 mutex_lock(&ftl->mutex);
1021
1022
1023 zone = sm_get_zone(ftl, zone_num);
1024 if (IS_ERR(zone)) {
1025 error = PTR_ERR(zone);
1026 goto unlock;
1027 }
1028
1029 /* Have to look at cache first */
1030 if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
1031 in_cache = 1;
1032 if (!sm_cache_get(ftl, buf, boffset))
1033 goto unlock;
1034 }
1035
1036 /* Translate the block and return if doesn't exist in the table */
1037 block = zone->lba_to_phys_table[block];
1038
1039 if (block == -1) {
1040 memset(buf, 0xFF, SM_SECTOR_SIZE);
1041 goto unlock;
1042 }
1043
1044 if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
1045 error = -EIO;
1046 goto unlock;
1047 }
1048
1049 if (in_cache)
1050 sm_cache_put(ftl, buf, boffset);
1051 unlock:
1052 mutex_unlock(&ftl->mutex);
1053 return error;
1054 }
1055
1056 /* outside interface: write a sector */
sm_write(struct mtd_blktrans_dev * dev,unsigned long sec_no,char * buf)1057 static int sm_write(struct mtd_blktrans_dev *dev,
1058 unsigned long sec_no, char *buf)
1059 {
1060 struct sm_ftl *ftl = dev->priv;
1061 struct ftl_zone *zone;
1062 int error = 0, zone_num, block, boffset;
1063
1064 BUG_ON(ftl->readonly);
1065 sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
1066
1067 /* No need in flush thread running now */
1068 timer_delete(&ftl->timer);
1069 mutex_lock(&ftl->mutex);
1070
1071 zone = sm_get_zone(ftl, zone_num);
1072 if (IS_ERR(zone)) {
1073 error = PTR_ERR(zone);
1074 goto unlock;
1075 }
1076
1077 /* If entry is not in cache, flush it */
1078 if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
1079
1080 error = sm_cache_flush(ftl);
1081 if (error)
1082 goto unlock;
1083
1084 ftl->cache_block = block;
1085 ftl->cache_zone = zone_num;
1086 }
1087
1088 sm_cache_put(ftl, buf, boffset);
1089 unlock:
1090 mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
1091 mutex_unlock(&ftl->mutex);
1092 return error;
1093 }
1094
1095 /* outside interface: flush everything */
sm_flush(struct mtd_blktrans_dev * dev)1096 static int sm_flush(struct mtd_blktrans_dev *dev)
1097 {
1098 struct sm_ftl *ftl = dev->priv;
1099 int retval;
1100
1101 mutex_lock(&ftl->mutex);
1102 retval = sm_cache_flush(ftl);
1103 mutex_unlock(&ftl->mutex);
1104 return retval;
1105 }
1106
1107 /* outside interface: device is released */
sm_release(struct mtd_blktrans_dev * dev)1108 static void sm_release(struct mtd_blktrans_dev *dev)
1109 {
1110 struct sm_ftl *ftl = dev->priv;
1111
1112 timer_delete_sync(&ftl->timer);
1113 cancel_work_sync(&ftl->flush_work);
1114 mutex_lock(&ftl->mutex);
1115 sm_cache_flush(ftl);
1116 mutex_unlock(&ftl->mutex);
1117 }
1118
1119 /* outside interface: get geometry */
sm_getgeo(struct mtd_blktrans_dev * dev,struct hd_geometry * geo)1120 static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
1121 {
1122 struct sm_ftl *ftl = dev->priv;
1123 geo->heads = ftl->heads;
1124 geo->sectors = ftl->sectors;
1125 geo->cylinders = ftl->cylinders;
1126 return 0;
1127 }
1128
1129 /* external interface: main initialization function */
sm_add_mtd(struct mtd_blktrans_ops * tr,struct mtd_info * mtd)1130 static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1131 {
1132 struct mtd_blktrans_dev *trans;
1133 struct sm_ftl *ftl;
1134
1135 /* Allocate & initialize our private structure */
1136 ftl = kzalloc_obj(struct sm_ftl);
1137 if (!ftl)
1138 goto error1;
1139
1140
1141 mutex_init(&ftl->mutex);
1142 timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
1143 INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
1144
1145 /* Read media information */
1146 if (sm_get_media_info(ftl, mtd)) {
1147 dbg("found unsupported mtd device, aborting");
1148 goto error2;
1149 }
1150
1151
1152 /* Allocate temporary CIS buffer for read retry support */
1153 ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
1154 if (!ftl->cis_buffer)
1155 goto error2;
1156
1157 /* Allocate zone array, it will be initialized on demand */
1158 ftl->zones = kzalloc_objs(struct ftl_zone, ftl->zone_count);
1159 if (!ftl->zones)
1160 goto error3;
1161
1162 /* Allocate the cache*/
1163 ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
1164
1165 if (!ftl->cache_data)
1166 goto error4;
1167
1168 sm_cache_init(ftl);
1169
1170
1171 /* Allocate upper layer structure and initialize it */
1172 trans = kzalloc_obj(struct mtd_blktrans_dev);
1173 if (!trans)
1174 goto error5;
1175
1176 ftl->trans = trans;
1177 trans->priv = ftl;
1178
1179 trans->tr = tr;
1180 trans->mtd = mtd;
1181 trans->devnum = -1;
1182 trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
1183 trans->readonly = ftl->readonly;
1184
1185 if (sm_find_cis(ftl)) {
1186 dbg("CIS not found on mtd device, aborting");
1187 goto error6;
1188 }
1189
1190 ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
1191 if (!ftl->disk_attributes)
1192 goto error6;
1193 trans->disk_attributes = ftl->disk_attributes;
1194
1195 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1196 (int)(mtd->size / (1024 * 1024)), mtd->index);
1197
1198 dbg("FTL layout:");
1199 dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1200 ftl->zone_count, ftl->max_lba,
1201 ftl->zone_size - ftl->max_lba);
1202 dbg("each block consists of %d bytes",
1203 ftl->block_size);
1204
1205
1206 /* Register device*/
1207 if (add_mtd_blktrans_dev(trans)) {
1208 dbg("error in mtdblktrans layer");
1209 goto error6;
1210 }
1211 return;
1212 error6:
1213 kfree(trans);
1214 error5:
1215 kfree(ftl->cache_data);
1216 error4:
1217 kfree(ftl->zones);
1218 error3:
1219 kfree(ftl->cis_buffer);
1220 error2:
1221 kfree(ftl);
1222 error1:
1223 return;
1224 }
1225
1226 /* main interface: device {surprise,} removal */
sm_remove_dev(struct mtd_blktrans_dev * dev)1227 static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1228 {
1229 struct sm_ftl *ftl = dev->priv;
1230 int i;
1231
1232 del_mtd_blktrans_dev(dev);
1233 ftl->trans = NULL;
1234
1235 for (i = 0 ; i < ftl->zone_count; i++) {
1236
1237 if (!ftl->zones[i].initialized)
1238 continue;
1239
1240 kfree(ftl->zones[i].lba_to_phys_table);
1241 kfifo_free(&ftl->zones[i].free_sectors);
1242 }
1243
1244 sm_delete_sysfs_attributes(ftl);
1245 kfree(ftl->cis_buffer);
1246 kfree(ftl->zones);
1247 kfree(ftl->cache_data);
1248 kfree(ftl);
1249 }
1250
1251 static struct mtd_blktrans_ops sm_ftl_ops = {
1252 .name = "smblk",
1253 .major = 0,
1254 .part_bits = SM_FTL_PARTN_BITS,
1255 .blksize = SM_SECTOR_SIZE,
1256 .getgeo = sm_getgeo,
1257
1258 .add_mtd = sm_add_mtd,
1259 .remove_dev = sm_remove_dev,
1260
1261 .readsect = sm_read,
1262 .writesect = sm_write,
1263
1264 .flush = sm_flush,
1265 .release = sm_release,
1266
1267 .owner = THIS_MODULE,
1268 };
1269
sm_module_init(void)1270 static __init int sm_module_init(void)
1271 {
1272 int error = 0;
1273
1274 cache_flush_workqueue = create_freezable_workqueue("smflush");
1275 if (!cache_flush_workqueue)
1276 return -ENOMEM;
1277
1278 error = register_mtd_blktrans(&sm_ftl_ops);
1279 if (error)
1280 destroy_workqueue(cache_flush_workqueue);
1281 return error;
1282
1283 }
1284
sm_module_exit(void)1285 static void __exit sm_module_exit(void)
1286 {
1287 destroy_workqueue(cache_flush_workqueue);
1288 deregister_mtd_blktrans(&sm_ftl_ops);
1289 }
1290
1291 module_init(sm_module_init);
1292 module_exit(sm_module_exit);
1293
1294 MODULE_LICENSE("GPL");
1295 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1296 MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
1297