1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ms_block.c - Sony MemoryStick (legacy) storage support
4
5 * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
6 *
7 * Minor portions of the driver were copied from mspro_block.c which is
8 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
9 */
10 #define DRIVER_NAME "ms_block"
11 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/blk-mq.h>
15 #include <linux/memstick.h>
16 #include <linux/idr.h>
17 #include <linux/hdreg.h>
18 #include <linux/delay.h>
19 #include <linux/slab.h>
20 #include <linux/random.h>
21 #include <linux/bitmap.h>
22 #include <linux/scatterlist.h>
23 #include <linux/jiffies.h>
24 #include <linux/workqueue.h>
25 #include <linux/mutex.h>
26 #include "ms_block.h"
27
28 static int debug;
29 static int cache_flush_timeout = 1000;
30 static bool verify_writes;
31
32 /*
33 * Copies section of 'sg_from' starting from offset 'offset' and with length
34 * 'len' To another scatterlist of to_nents enties
35 */
msb_sg_copy(struct scatterlist * sg_from,struct scatterlist * sg_to,int to_nents,size_t offset,size_t len)36 static size_t msb_sg_copy(struct scatterlist *sg_from,
37 struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
38 {
39 size_t copied = 0;
40
41 while (offset > 0) {
42 if (offset >= sg_from->length) {
43 if (sg_is_last(sg_from))
44 return 0;
45
46 offset -= sg_from->length;
47 sg_from = sg_next(sg_from);
48 continue;
49 }
50
51 copied = min(len, sg_from->length - offset);
52 sg_set_page(sg_to, sg_page(sg_from),
53 copied, sg_from->offset + offset);
54
55 len -= copied;
56 offset = 0;
57
58 if (sg_is_last(sg_from) || !len)
59 goto out;
60
61 sg_to = sg_next(sg_to);
62 to_nents--;
63 sg_from = sg_next(sg_from);
64 }
65
66 while (len > sg_from->length && to_nents--) {
67 len -= sg_from->length;
68 copied += sg_from->length;
69
70 sg_set_page(sg_to, sg_page(sg_from),
71 sg_from->length, sg_from->offset);
72
73 if (sg_is_last(sg_from) || !len)
74 goto out;
75
76 sg_from = sg_next(sg_from);
77 sg_to = sg_next(sg_to);
78 }
79
80 if (len && to_nents) {
81 sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
82 copied += len;
83 }
84 out:
85 sg_mark_end(sg_to);
86 return copied;
87 }
88
89 /*
90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
91 * to linear buffer of length 'len' at address 'buffer'
92 * Returns 0 if equal and -1 otherwice
93 */
msb_sg_compare_to_buffer(struct scatterlist * sg,size_t offset,u8 * buffer,size_t len)94 static int msb_sg_compare_to_buffer(struct scatterlist *sg,
95 size_t offset, u8 *buffer, size_t len)
96 {
97 int retval = 0, cmplen;
98 struct sg_mapping_iter miter;
99
100 sg_miter_start(&miter, sg, sg_nents(sg),
101 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
102
103 while (sg_miter_next(&miter) && len > 0) {
104 if (offset >= miter.length) {
105 offset -= miter.length;
106 continue;
107 }
108
109 cmplen = min(miter.length - offset, len);
110 retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
111 if (retval)
112 break;
113
114 buffer += cmplen;
115 len -= cmplen;
116 offset = 0;
117 }
118
119 if (!retval && len)
120 retval = -1;
121
122 sg_miter_stop(&miter);
123 return retval;
124 }
125
126
127 /* Get zone at which block with logical address 'lba' lives
128 * Flash is broken into zones.
129 * Each zone consists of 512 eraseblocks, out of which in first
130 * zone 494 are used and 496 are for all following zones.
131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
132 */
msb_get_zone_from_lba(int lba)133 static int msb_get_zone_from_lba(int lba)
134 {
135 if (lba < 494)
136 return 0;
137 return ((lba - 494) / 496) + 1;
138 }
139
140 /* Get zone of physical block. Trivial */
msb_get_zone_from_pba(int pba)141 static int msb_get_zone_from_pba(int pba)
142 {
143 return pba / MS_BLOCKS_IN_ZONE;
144 }
145
146 /* Debug test to validate free block counts */
msb_validate_used_block_bitmap(struct msb_data * msb)147 static int msb_validate_used_block_bitmap(struct msb_data *msb)
148 {
149 int total_free_blocks = 0;
150 int i;
151
152 if (!debug)
153 return 0;
154
155 for (i = 0; i < msb->zone_count; i++)
156 total_free_blocks += msb->free_block_count[i];
157
158 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
159 msb->block_count) == total_free_blocks)
160 return 0;
161
162 pr_err("BUG: free block counts don't match the bitmap");
163 msb->read_only = true;
164 return -EINVAL;
165 }
166
167 /* Mark physical block as used */
msb_mark_block_used(struct msb_data * msb,int pba)168 static void msb_mark_block_used(struct msb_data *msb, int pba)
169 {
170 int zone = msb_get_zone_from_pba(pba);
171
172 if (test_bit(pba, msb->used_blocks_bitmap)) {
173 pr_err(
174 "BUG: attempt to mark already used pba %d as used", pba);
175 msb->read_only = true;
176 return;
177 }
178
179 if (msb_validate_used_block_bitmap(msb))
180 return;
181
182 /* No races because all IO is single threaded */
183 __set_bit(pba, msb->used_blocks_bitmap);
184 msb->free_block_count[zone]--;
185 }
186
187 /* Mark physical block as free */
msb_mark_block_unused(struct msb_data * msb,int pba)188 static void msb_mark_block_unused(struct msb_data *msb, int pba)
189 {
190 int zone = msb_get_zone_from_pba(pba);
191
192 if (!test_bit(pba, msb->used_blocks_bitmap)) {
193 pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
194 msb->read_only = true;
195 return;
196 }
197
198 if (msb_validate_used_block_bitmap(msb))
199 return;
200
201 /* No races because all IO is single threaded */
202 __clear_bit(pba, msb->used_blocks_bitmap);
203 msb->free_block_count[zone]++;
204 }
205
206 /* Invalidate current register window */
msb_invalidate_reg_window(struct msb_data * msb)207 static void msb_invalidate_reg_window(struct msb_data *msb)
208 {
209 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
210 msb->reg_addr.w_length = sizeof(struct ms_id_register);
211 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
212 msb->reg_addr.r_length = sizeof(struct ms_id_register);
213 msb->addr_valid = false;
214 }
215
216 /* Start a state machine */
msb_run_state_machine(struct msb_data * msb,int (* state_func)(struct memstick_dev * card,struct memstick_request ** req))217 static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
218 (struct memstick_dev *card, struct memstick_request **req))
219 {
220 struct memstick_dev *card = msb->card;
221
222 WARN_ON(msb->state != -1);
223 msb->int_polling = false;
224 msb->state = 0;
225 msb->exit_error = 0;
226
227 memset(&card->current_mrq, 0, sizeof(card->current_mrq));
228
229 card->next_request = state_func;
230 memstick_new_req(card->host);
231 wait_for_completion(&card->mrq_complete);
232
233 WARN_ON(msb->state != -1);
234 return msb->exit_error;
235 }
236
237 /* State machines call that to exit */
msb_exit_state_machine(struct msb_data * msb,int error)238 static int msb_exit_state_machine(struct msb_data *msb, int error)
239 {
240 WARN_ON(msb->state == -1);
241
242 msb->state = -1;
243 msb->exit_error = error;
244 msb->card->next_request = h_msb_default_bad;
245
246 /* Invalidate reg window on errors */
247 if (error)
248 msb_invalidate_reg_window(msb);
249
250 complete(&msb->card->mrq_complete);
251 return -ENXIO;
252 }
253
254 /* read INT register */
msb_read_int_reg(struct msb_data * msb,long timeout)255 static int msb_read_int_reg(struct msb_data *msb, long timeout)
256 {
257 struct memstick_request *mrq = &msb->card->current_mrq;
258
259 WARN_ON(msb->state == -1);
260
261 if (!msb->int_polling) {
262 msb->int_timeout = jiffies +
263 msecs_to_jiffies(timeout == -1 ? 500 : timeout);
264 msb->int_polling = true;
265 } else if (time_after(jiffies, msb->int_timeout)) {
266 mrq->data[0] = MEMSTICK_INT_CMDNAK;
267 return 0;
268 }
269
270 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
271 mrq->need_card_int && !mrq->error) {
272 mrq->data[0] = mrq->int_reg;
273 mrq->need_card_int = false;
274 return 0;
275 } else {
276 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
277 return 1;
278 }
279 }
280
281 /* Read a register */
msb_read_regs(struct msb_data * msb,int offset,int len)282 static int msb_read_regs(struct msb_data *msb, int offset, int len)
283 {
284 struct memstick_request *req = &msb->card->current_mrq;
285
286 if (msb->reg_addr.r_offset != offset ||
287 msb->reg_addr.r_length != len || !msb->addr_valid) {
288
289 msb->reg_addr.r_offset = offset;
290 msb->reg_addr.r_length = len;
291 msb->addr_valid = true;
292
293 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
294 &msb->reg_addr, sizeof(msb->reg_addr));
295 return 0;
296 }
297
298 memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
299 return 1;
300 }
301
302 /* Write a card register */
msb_write_regs(struct msb_data * msb,int offset,int len,void * buf)303 static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
304 {
305 struct memstick_request *req = &msb->card->current_mrq;
306
307 if (msb->reg_addr.w_offset != offset ||
308 msb->reg_addr.w_length != len || !msb->addr_valid) {
309
310 msb->reg_addr.w_offset = offset;
311 msb->reg_addr.w_length = len;
312 msb->addr_valid = true;
313
314 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
315 &msb->reg_addr, sizeof(msb->reg_addr));
316 return 0;
317 }
318
319 memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
320 return 1;
321 }
322
323 /* Handler for absence of IO */
h_msb_default_bad(struct memstick_dev * card,struct memstick_request ** mrq)324 static int h_msb_default_bad(struct memstick_dev *card,
325 struct memstick_request **mrq)
326 {
327 return -ENXIO;
328 }
329
330 /*
331 * This function is a handler for reads of one page from device.
332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
333 * Can also be used to read extra data only. Set params accordintly.
334 */
h_msb_read_page(struct memstick_dev * card,struct memstick_request ** out_mrq)335 static int h_msb_read_page(struct memstick_dev *card,
336 struct memstick_request **out_mrq)
337 {
338 struct msb_data *msb = memstick_get_drvdata(card);
339 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
340 struct scatterlist sg[2];
341 u8 command, intreg;
342
343 if (mrq->error) {
344 dbg("read_page, unknown error");
345 return msb_exit_state_machine(msb, mrq->error);
346 }
347 again:
348 switch (msb->state) {
349 case MSB_RP_SEND_BLOCK_ADDRESS:
350 /* msb_write_regs sometimes "fails" because it needs to update
351 * the reg window, and thus it returns request for that.
352 * Then we stay in this state and retry
353 */
354 if (!msb_write_regs(msb,
355 offsetof(struct ms_register, param),
356 sizeof(struct ms_param_register),
357 (unsigned char *)&msb->regs.param))
358 return 0;
359
360 msb->state = MSB_RP_SEND_READ_COMMAND;
361 return 0;
362
363 case MSB_RP_SEND_READ_COMMAND:
364 command = MS_CMD_BLOCK_READ;
365 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
366 msb->state = MSB_RP_SEND_INT_REQ;
367 return 0;
368
369 case MSB_RP_SEND_INT_REQ:
370 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
371 /* If dont actually need to send the int read request (only in
372 * serial mode), then just fall through
373 */
374 if (msb_read_int_reg(msb, -1))
375 return 0;
376 fallthrough;
377
378 case MSB_RP_RECEIVE_INT_REQ_RESULT:
379 intreg = mrq->data[0];
380 msb->regs.status.interrupt = intreg;
381
382 if (intreg & MEMSTICK_INT_CMDNAK)
383 return msb_exit_state_machine(msb, -EIO);
384
385 if (!(intreg & MEMSTICK_INT_CED)) {
386 msb->state = MSB_RP_SEND_INT_REQ;
387 goto again;
388 }
389
390 msb->int_polling = false;
391 msb->state = (intreg & MEMSTICK_INT_ERR) ?
392 MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
393 goto again;
394
395 case MSB_RP_SEND_READ_STATUS_REG:
396 /* read the status register to understand source of the INT_ERR */
397 if (!msb_read_regs(msb,
398 offsetof(struct ms_register, status),
399 sizeof(struct ms_status_register)))
400 return 0;
401
402 msb->state = MSB_RP_RECEIVE_STATUS_REG;
403 return 0;
404
405 case MSB_RP_RECEIVE_STATUS_REG:
406 msb->regs.status = *(struct ms_status_register *)mrq->data;
407 msb->state = MSB_RP_SEND_OOB_READ;
408 fallthrough;
409
410 case MSB_RP_SEND_OOB_READ:
411 if (!msb_read_regs(msb,
412 offsetof(struct ms_register, extra_data),
413 sizeof(struct ms_extra_data_register)))
414 return 0;
415
416 msb->state = MSB_RP_RECEIVE_OOB_READ;
417 return 0;
418
419 case MSB_RP_RECEIVE_OOB_READ:
420 msb->regs.extra_data =
421 *(struct ms_extra_data_register *) mrq->data;
422 msb->state = MSB_RP_SEND_READ_DATA;
423 fallthrough;
424
425 case MSB_RP_SEND_READ_DATA:
426 /* Skip that state if we only read the oob */
427 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
428 msb->state = MSB_RP_RECEIVE_READ_DATA;
429 goto again;
430 }
431
432 sg_init_table(sg, ARRAY_SIZE(sg));
433 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
434 msb->current_sg_offset,
435 msb->page_size);
436
437 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
438 msb->state = MSB_RP_RECEIVE_READ_DATA;
439 return 0;
440
441 case MSB_RP_RECEIVE_READ_DATA:
442 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
443 msb->current_sg_offset += msb->page_size;
444 return msb_exit_state_machine(msb, 0);
445 }
446
447 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
448 dbg("read_page: uncorrectable error");
449 return msb_exit_state_machine(msb, -EBADMSG);
450 }
451
452 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
453 dbg("read_page: correctable error");
454 msb->current_sg_offset += msb->page_size;
455 return msb_exit_state_machine(msb, -EUCLEAN);
456 } else {
457 dbg("read_page: INT error, but no status error bits");
458 return msb_exit_state_machine(msb, -EIO);
459 }
460 }
461
462 BUG();
463 }
464
465 /*
466 * Handler of writes of exactly one block.
467 * Takes address from msb->regs.param.
468 * Writes same extra data to blocks, also taken
469 * from msb->regs.extra
470 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
471 * device refuses to take the command or something else
472 */
h_msb_write_block(struct memstick_dev * card,struct memstick_request ** out_mrq)473 static int h_msb_write_block(struct memstick_dev *card,
474 struct memstick_request **out_mrq)
475 {
476 struct msb_data *msb = memstick_get_drvdata(card);
477 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
478 struct scatterlist sg[2];
479 u8 intreg, command;
480
481 if (mrq->error)
482 return msb_exit_state_machine(msb, mrq->error);
483
484 again:
485 switch (msb->state) {
486
487 /* HACK: Jmicon handling of TPCs between 8 and
488 * sizeof(memstick_request.data) is broken due to hardware
489 * bug in PIO mode that is used for these TPCs
490 * Therefore split the write
491 */
492
493 case MSB_WB_SEND_WRITE_PARAMS:
494 if (!msb_write_regs(msb,
495 offsetof(struct ms_register, param),
496 sizeof(struct ms_param_register),
497 &msb->regs.param))
498 return 0;
499
500 msb->state = MSB_WB_SEND_WRITE_OOB;
501 return 0;
502
503 case MSB_WB_SEND_WRITE_OOB:
504 if (!msb_write_regs(msb,
505 offsetof(struct ms_register, extra_data),
506 sizeof(struct ms_extra_data_register),
507 &msb->regs.extra_data))
508 return 0;
509 msb->state = MSB_WB_SEND_WRITE_COMMAND;
510 return 0;
511
512
513 case MSB_WB_SEND_WRITE_COMMAND:
514 command = MS_CMD_BLOCK_WRITE;
515 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
516 msb->state = MSB_WB_SEND_INT_REQ;
517 return 0;
518
519 case MSB_WB_SEND_INT_REQ:
520 msb->state = MSB_WB_RECEIVE_INT_REQ;
521 if (msb_read_int_reg(msb, -1))
522 return 0;
523 fallthrough;
524
525 case MSB_WB_RECEIVE_INT_REQ:
526 intreg = mrq->data[0];
527 msb->regs.status.interrupt = intreg;
528
529 /* errors mean out of here, and fast... */
530 if (intreg & (MEMSTICK_INT_CMDNAK))
531 return msb_exit_state_machine(msb, -EIO);
532
533 if (intreg & MEMSTICK_INT_ERR)
534 return msb_exit_state_machine(msb, -EBADMSG);
535
536
537 /* for last page we need to poll CED */
538 if (msb->current_page == msb->pages_in_block) {
539 if (intreg & MEMSTICK_INT_CED)
540 return msb_exit_state_machine(msb, 0);
541 msb->state = MSB_WB_SEND_INT_REQ;
542 goto again;
543
544 }
545
546 /* for non-last page we need BREQ before writing next chunk */
547 if (!(intreg & MEMSTICK_INT_BREQ)) {
548 msb->state = MSB_WB_SEND_INT_REQ;
549 goto again;
550 }
551
552 msb->int_polling = false;
553 msb->state = MSB_WB_SEND_WRITE_DATA;
554 fallthrough;
555
556 case MSB_WB_SEND_WRITE_DATA:
557 sg_init_table(sg, ARRAY_SIZE(sg));
558
559 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
560 msb->current_sg_offset,
561 msb->page_size) < msb->page_size)
562 return msb_exit_state_machine(msb, -EIO);
563
564 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
565 mrq->need_card_int = 1;
566 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
567 return 0;
568
569 case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
570 msb->current_page++;
571 msb->current_sg_offset += msb->page_size;
572 msb->state = MSB_WB_SEND_INT_REQ;
573 goto again;
574 default:
575 BUG();
576 }
577
578 return 0;
579 }
580
581 /*
582 * This function is used to send simple IO requests to device that consist
583 * of register write + command
584 */
h_msb_send_command(struct memstick_dev * card,struct memstick_request ** out_mrq)585 static int h_msb_send_command(struct memstick_dev *card,
586 struct memstick_request **out_mrq)
587 {
588 struct msb_data *msb = memstick_get_drvdata(card);
589 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
590 u8 intreg;
591
592 if (mrq->error) {
593 dbg("send_command: unknown error");
594 return msb_exit_state_machine(msb, mrq->error);
595 }
596 again:
597 switch (msb->state) {
598
599 /* HACK: see h_msb_write_block */
600 case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
601 if (!msb_write_regs(msb,
602 offsetof(struct ms_register, param),
603 sizeof(struct ms_param_register),
604 &msb->regs.param))
605 return 0;
606 msb->state = MSB_SC_SEND_WRITE_OOB;
607 return 0;
608
609 case MSB_SC_SEND_WRITE_OOB:
610 if (!msb->command_need_oob) {
611 msb->state = MSB_SC_SEND_COMMAND;
612 goto again;
613 }
614
615 if (!msb_write_regs(msb,
616 offsetof(struct ms_register, extra_data),
617 sizeof(struct ms_extra_data_register),
618 &msb->regs.extra_data))
619 return 0;
620
621 msb->state = MSB_SC_SEND_COMMAND;
622 return 0;
623
624 case MSB_SC_SEND_COMMAND:
625 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
626 msb->state = MSB_SC_SEND_INT_REQ;
627 return 0;
628
629 case MSB_SC_SEND_INT_REQ:
630 msb->state = MSB_SC_RECEIVE_INT_REQ;
631 if (msb_read_int_reg(msb, -1))
632 return 0;
633 fallthrough;
634
635 case MSB_SC_RECEIVE_INT_REQ:
636 intreg = mrq->data[0];
637
638 if (intreg & MEMSTICK_INT_CMDNAK)
639 return msb_exit_state_machine(msb, -EIO);
640 if (intreg & MEMSTICK_INT_ERR)
641 return msb_exit_state_machine(msb, -EBADMSG);
642
643 if (!(intreg & MEMSTICK_INT_CED)) {
644 msb->state = MSB_SC_SEND_INT_REQ;
645 goto again;
646 }
647
648 return msb_exit_state_machine(msb, 0);
649 }
650
651 BUG();
652 }
653
654 /* Small handler for card reset */
h_msb_reset(struct memstick_dev * card,struct memstick_request ** out_mrq)655 static int h_msb_reset(struct memstick_dev *card,
656 struct memstick_request **out_mrq)
657 {
658 u8 command = MS_CMD_RESET;
659 struct msb_data *msb = memstick_get_drvdata(card);
660 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
661
662 if (mrq->error)
663 return msb_exit_state_machine(msb, mrq->error);
664
665 switch (msb->state) {
666 case MSB_RS_SEND:
667 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
668 mrq->need_card_int = 0;
669 msb->state = MSB_RS_CONFIRM;
670 return 0;
671 case MSB_RS_CONFIRM:
672 return msb_exit_state_machine(msb, 0);
673 }
674 BUG();
675 }
676
677 /* This handler is used to do serial->parallel switch */
h_msb_parallel_switch(struct memstick_dev * card,struct memstick_request ** out_mrq)678 static int h_msb_parallel_switch(struct memstick_dev *card,
679 struct memstick_request **out_mrq)
680 {
681 struct msb_data *msb = memstick_get_drvdata(card);
682 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
683 struct memstick_host *host = card->host;
684
685 if (mrq->error) {
686 dbg("parallel_switch: error");
687 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
688 return msb_exit_state_machine(msb, mrq->error);
689 }
690
691 switch (msb->state) {
692 case MSB_PS_SEND_SWITCH_COMMAND:
693 /* Set the parallel interface on memstick side */
694 msb->regs.param.system |= MEMSTICK_SYS_PAM;
695
696 if (!msb_write_regs(msb,
697 offsetof(struct ms_register, param),
698 1,
699 (unsigned char *)&msb->regs.param))
700 return 0;
701
702 msb->state = MSB_PS_SWICH_HOST;
703 return 0;
704
705 case MSB_PS_SWICH_HOST:
706 /* Set parallel interface on our side + send a dummy request
707 * to see if card responds
708 */
709 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
710 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
711 msb->state = MSB_PS_CONFIRM;
712 return 0;
713
714 case MSB_PS_CONFIRM:
715 return msb_exit_state_machine(msb, 0);
716 }
717
718 BUG();
719 }
720
721 static int msb_switch_to_parallel(struct msb_data *msb);
722
723 /* Reset the card, to guard against hw errors beeing treated as bad blocks */
msb_reset(struct msb_data * msb,bool full)724 static int msb_reset(struct msb_data *msb, bool full)
725 {
726
727 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
728 struct memstick_dev *card = msb->card;
729 struct memstick_host *host = card->host;
730 int error;
731
732 /* Reset the card */
733 msb->regs.param.system = MEMSTICK_SYS_BAMD;
734
735 if (full) {
736 error = host->set_param(host,
737 MEMSTICK_POWER, MEMSTICK_POWER_OFF);
738 if (error)
739 goto out_error;
740
741 msb_invalidate_reg_window(msb);
742
743 error = host->set_param(host,
744 MEMSTICK_POWER, MEMSTICK_POWER_ON);
745 if (error)
746 goto out_error;
747
748 error = host->set_param(host,
749 MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
750 if (error) {
751 out_error:
752 dbg("Failed to reset the host controller");
753 msb->read_only = true;
754 return -EFAULT;
755 }
756 }
757
758 error = msb_run_state_machine(msb, h_msb_reset);
759 if (error) {
760 dbg("Failed to reset the card");
761 msb->read_only = true;
762 return -ENODEV;
763 }
764
765 /* Set parallel mode */
766 if (was_parallel)
767 msb_switch_to_parallel(msb);
768 return 0;
769 }
770
771 /* Attempts to switch interface to parallel mode */
msb_switch_to_parallel(struct msb_data * msb)772 static int msb_switch_to_parallel(struct msb_data *msb)
773 {
774 int error;
775
776 error = msb_run_state_machine(msb, h_msb_parallel_switch);
777 if (error) {
778 pr_err("Switch to parallel failed");
779 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
780 msb_reset(msb, true);
781 return -EFAULT;
782 }
783
784 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
785 return 0;
786 }
787
788 /* Changes overwrite flag on a page */
msb_set_overwrite_flag(struct msb_data * msb,u16 pba,u8 page,u8 flag)789 static int msb_set_overwrite_flag(struct msb_data *msb,
790 u16 pba, u8 page, u8 flag)
791 {
792 if (msb->read_only)
793 return -EROFS;
794
795 msb->regs.param.block_address = cpu_to_be16(pba);
796 msb->regs.param.page_address = page;
797 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
798 msb->regs.extra_data.overwrite_flag = flag;
799 msb->command_value = MS_CMD_BLOCK_WRITE;
800 msb->command_need_oob = true;
801
802 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
803 flag, pba, page);
804 return msb_run_state_machine(msb, h_msb_send_command);
805 }
806
msb_mark_bad(struct msb_data * msb,int pba)807 static int msb_mark_bad(struct msb_data *msb, int pba)
808 {
809 pr_notice("marking pba %d as bad", pba);
810 msb_reset(msb, true);
811 return msb_set_overwrite_flag(
812 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
813 }
814
msb_mark_page_bad(struct msb_data * msb,int pba,int page)815 static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
816 {
817 dbg("marking page %d of pba %d as bad", page, pba);
818 msb_reset(msb, true);
819 return msb_set_overwrite_flag(msb,
820 pba, page, ~MEMSTICK_OVERWRITE_PGST0);
821 }
822
823 /* Erases one physical block */
msb_erase_block(struct msb_data * msb,u16 pba)824 static int msb_erase_block(struct msb_data *msb, u16 pba)
825 {
826 int error, try;
827
828 if (msb->read_only)
829 return -EROFS;
830
831 dbg_verbose("erasing pba %d", pba);
832
833 for (try = 1; try < 3; try++) {
834 msb->regs.param.block_address = cpu_to_be16(pba);
835 msb->regs.param.page_address = 0;
836 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
837 msb->command_value = MS_CMD_BLOCK_ERASE;
838 msb->command_need_oob = false;
839
840
841 error = msb_run_state_machine(msb, h_msb_send_command);
842 if (!error || msb_reset(msb, true))
843 break;
844 }
845
846 if (error) {
847 pr_err("erase failed, marking pba %d as bad", pba);
848 msb_mark_bad(msb, pba);
849 }
850
851 dbg_verbose("erase success, marking pba %d as unused", pba);
852 msb_mark_block_unused(msb, pba);
853 __set_bit(pba, msb->erased_blocks_bitmap);
854 return error;
855 }
856
857 /* Reads one page from device */
msb_read_page(struct msb_data * msb,u16 pba,u8 page,struct ms_extra_data_register * extra,struct scatterlist * sg,int offset)858 static int msb_read_page(struct msb_data *msb,
859 u16 pba, u8 page, struct ms_extra_data_register *extra,
860 struct scatterlist *sg, int offset)
861 {
862 int try, error;
863
864 if (pba == MS_BLOCK_INVALID) {
865 unsigned long flags;
866 struct sg_mapping_iter miter;
867 size_t len = msb->page_size;
868
869 dbg_verbose("read unmapped sector. returning 0xFF");
870
871 local_irq_save(flags);
872 sg_miter_start(&miter, sg, sg_nents(sg),
873 SG_MITER_ATOMIC | SG_MITER_TO_SG);
874
875 while (sg_miter_next(&miter) && len > 0) {
876
877 int chunklen;
878
879 if (offset && offset >= miter.length) {
880 offset -= miter.length;
881 continue;
882 }
883
884 chunklen = min(miter.length - offset, len);
885 memset(miter.addr + offset, 0xFF, chunklen);
886 len -= chunklen;
887 offset = 0;
888 }
889
890 sg_miter_stop(&miter);
891 local_irq_restore(flags);
892
893 if (offset)
894 return -EFAULT;
895
896 if (extra)
897 memset(extra, 0xFF, sizeof(*extra));
898 return 0;
899 }
900
901 if (pba >= msb->block_count) {
902 pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
903 return -EINVAL;
904 }
905
906 for (try = 1; try < 3; try++) {
907 msb->regs.param.block_address = cpu_to_be16(pba);
908 msb->regs.param.page_address = page;
909 msb->regs.param.cp = MEMSTICK_CP_PAGE;
910
911 msb->current_sg = sg;
912 msb->current_sg_offset = offset;
913 error = msb_run_state_machine(msb, h_msb_read_page);
914
915
916 if (error == -EUCLEAN) {
917 pr_notice("correctable error on pba %d, page %d",
918 pba, page);
919 error = 0;
920 }
921
922 if (!error && extra)
923 *extra = msb->regs.extra_data;
924
925 if (!error || msb_reset(msb, true))
926 break;
927
928 }
929
930 /* Mark bad pages */
931 if (error == -EBADMSG) {
932 pr_err("uncorrectable error on read of pba %d, page %d",
933 pba, page);
934
935 if (msb->regs.extra_data.overwrite_flag &
936 MEMSTICK_OVERWRITE_PGST0)
937 msb_mark_page_bad(msb, pba, page);
938 return -EBADMSG;
939 }
940
941 if (error)
942 pr_err("read of pba %d, page %d failed with error %d",
943 pba, page, error);
944 return error;
945 }
946
947 /* Reads oob of page only */
msb_read_oob(struct msb_data * msb,u16 pba,u16 page,struct ms_extra_data_register * extra)948 static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
949 struct ms_extra_data_register *extra)
950 {
951 int error;
952
953 BUG_ON(!extra);
954 msb->regs.param.block_address = cpu_to_be16(pba);
955 msb->regs.param.page_address = page;
956 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
957
958 if (pba > msb->block_count) {
959 pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
960 return -EINVAL;
961 }
962
963 error = msb_run_state_machine(msb, h_msb_read_page);
964 *extra = msb->regs.extra_data;
965
966 if (error == -EUCLEAN) {
967 pr_notice("correctable error on pba %d, page %d",
968 pba, page);
969 return 0;
970 }
971
972 return error;
973 }
974
975 /* Reads a block and compares it with data contained in scatterlist orig_sg */
msb_verify_block(struct msb_data * msb,u16 pba,struct scatterlist * orig_sg,int offset)976 static int msb_verify_block(struct msb_data *msb, u16 pba,
977 struct scatterlist *orig_sg, int offset)
978 {
979 struct scatterlist sg;
980 int page = 0, error;
981
982 sg_init_one(&sg, msb->block_buffer, msb->block_size);
983
984 while (page < msb->pages_in_block) {
985
986 error = msb_read_page(msb, pba, page,
987 NULL, &sg, page * msb->page_size);
988 if (error)
989 return error;
990 page++;
991 }
992
993 if (msb_sg_compare_to_buffer(orig_sg, offset,
994 msb->block_buffer, msb->block_size))
995 return -EIO;
996 return 0;
997 }
998
999 /* Writes exactly one block + oob */
msb_write_block(struct msb_data * msb,u16 pba,u32 lba,struct scatterlist * sg,int offset)1000 static int msb_write_block(struct msb_data *msb,
1001 u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002 {
1003 int error, current_try = 1;
1004
1005 BUG_ON(sg->length < msb->page_size);
1006
1007 if (msb->read_only)
1008 return -EROFS;
1009
1010 if (pba == MS_BLOCK_INVALID) {
1011 pr_err(
1012 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1013 return -EINVAL;
1014 }
1015
1016 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1017 pr_err(
1018 "BUG: write: attempt to write beyond the end of device");
1019 return -EINVAL;
1020 }
1021
1022 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1023 pr_err("BUG: write: lba zone mismatch");
1024 return -EINVAL;
1025 }
1026
1027 if (pba == msb->boot_block_locations[0] ||
1028 pba == msb->boot_block_locations[1]) {
1029 pr_err("BUG: write: attempt to write to boot blocks!");
1030 return -EINVAL;
1031 }
1032
1033 while (1) {
1034
1035 if (msb->read_only)
1036 return -EROFS;
1037
1038 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1039 msb->regs.param.page_address = 0;
1040 msb->regs.param.block_address = cpu_to_be16(pba);
1041
1042 msb->regs.extra_data.management_flag = 0xFF;
1043 msb->regs.extra_data.overwrite_flag = 0xF8;
1044 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1045
1046 msb->current_sg = sg;
1047 msb->current_sg_offset = offset;
1048 msb->current_page = 0;
1049
1050 error = msb_run_state_machine(msb, h_msb_write_block);
1051
1052 /* Sector we just wrote to is assumed erased since its pba
1053 * was erased. If it wasn't erased, write will succeed
1054 * and will just clear the bits that were set in the block
1055 * thus test that what we have written,
1056 * matches what we expect.
1057 * We do trust the blocks that we erased
1058 */
1059 if (!error && (verify_writes ||
1060 !test_bit(pba, msb->erased_blocks_bitmap)))
1061 error = msb_verify_block(msb, pba, sg, offset);
1062
1063 if (!error)
1064 break;
1065
1066 if (current_try > 1 || msb_reset(msb, true))
1067 break;
1068
1069 pr_err("write failed, trying to erase the pba %d", pba);
1070 error = msb_erase_block(msb, pba);
1071 if (error)
1072 break;
1073
1074 current_try++;
1075 }
1076 return error;
1077 }
1078
1079 /* Finds a free block for write replacement */
msb_get_free_block(struct msb_data * msb,int zone)1080 static u16 msb_get_free_block(struct msb_data *msb, int zone)
1081 {
1082 u16 pos;
1083 int pba = zone * MS_BLOCKS_IN_ZONE;
1084 int i;
1085
1086 get_random_bytes(&pos, sizeof(pos));
1087
1088 if (!msb->free_block_count[zone]) {
1089 pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1090 msb->read_only = true;
1091 return MS_BLOCK_INVALID;
1092 }
1093
1094 pos %= msb->free_block_count[zone];
1095
1096 dbg_verbose("have %d choices for a free block, selected randomly: %d",
1097 msb->free_block_count[zone], pos);
1098
1099 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1100 msb->block_count, pba);
1101 for (i = 0; i < pos; ++i)
1102 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1103 msb->block_count, pba + 1);
1104
1105 dbg_verbose("result of the free blocks scan: pba %d", pba);
1106
1107 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1108 pr_err("BUG: can't get a free block");
1109 msb->read_only = true;
1110 return MS_BLOCK_INVALID;
1111 }
1112
1113 msb_mark_block_used(msb, pba);
1114 return pba;
1115 }
1116
msb_update_block(struct msb_data * msb,u16 lba,struct scatterlist * sg,int offset)1117 static int msb_update_block(struct msb_data *msb, u16 lba,
1118 struct scatterlist *sg, int offset)
1119 {
1120 u16 pba, new_pba;
1121 int error, try;
1122
1123 pba = msb->lba_to_pba_table[lba];
1124 dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
1125
1126 if (pba != MS_BLOCK_INVALID) {
1127 dbg_verbose("setting the update flag on the block");
1128 msb_set_overwrite_flag(msb, pba, 0,
1129 0xFF & ~MEMSTICK_OVERWRITE_UDST);
1130 }
1131
1132 for (try = 0; try < 3; try++) {
1133 new_pba = msb_get_free_block(msb,
1134 msb_get_zone_from_lba(lba));
1135
1136 if (new_pba == MS_BLOCK_INVALID) {
1137 error = -EIO;
1138 goto out;
1139 }
1140
1141 dbg_verbose("block update: writing updated block to the pba %d",
1142 new_pba);
1143 error = msb_write_block(msb, new_pba, lba, sg, offset);
1144 if (error == -EBADMSG) {
1145 msb_mark_bad(msb, new_pba);
1146 continue;
1147 }
1148
1149 if (error)
1150 goto out;
1151
1152 dbg_verbose("block update: erasing the old block");
1153 msb_erase_block(msb, pba);
1154 msb->lba_to_pba_table[lba] = new_pba;
1155 return 0;
1156 }
1157 out:
1158 if (error) {
1159 pr_err("block update error after %d tries, switching to r/o mode", try);
1160 msb->read_only = true;
1161 }
1162 return error;
1163 }
1164
1165 /* Converts endiannes in the boot block for easy use */
msb_fix_boot_page_endianness(struct ms_boot_page * p)1166 static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1167 {
1168 p->header.block_id = be16_to_cpu(p->header.block_id);
1169 p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1170 p->entry.disabled_block.start_addr
1171 = be32_to_cpu(p->entry.disabled_block.start_addr);
1172 p->entry.disabled_block.data_size
1173 = be32_to_cpu(p->entry.disabled_block.data_size);
1174 p->entry.cis_idi.start_addr
1175 = be32_to_cpu(p->entry.cis_idi.start_addr);
1176 p->entry.cis_idi.data_size
1177 = be32_to_cpu(p->entry.cis_idi.data_size);
1178 p->attr.block_size = be16_to_cpu(p->attr.block_size);
1179 p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1180 p->attr.number_of_effective_blocks
1181 = be16_to_cpu(p->attr.number_of_effective_blocks);
1182 p->attr.page_size = be16_to_cpu(p->attr.page_size);
1183 p->attr.memory_manufacturer_code
1184 = be16_to_cpu(p->attr.memory_manufacturer_code);
1185 p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1186 p->attr.implemented_capacity
1187 = be16_to_cpu(p->attr.implemented_capacity);
1188 p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1189 p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1190 }
1191
msb_read_boot_blocks(struct msb_data * msb)1192 static int msb_read_boot_blocks(struct msb_data *msb)
1193 {
1194 int pba = 0;
1195 struct scatterlist sg;
1196 struct ms_extra_data_register extra;
1197 struct ms_boot_page *page;
1198
1199 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1200 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1201 msb->boot_block_count = 0;
1202
1203 dbg_verbose("Start of a scan for the boot blocks");
1204
1205 if (!msb->boot_page) {
1206 page = kmalloc_objs(struct ms_boot_page, 2);
1207 if (!page)
1208 return -ENOMEM;
1209
1210 msb->boot_page = page;
1211 } else
1212 page = msb->boot_page;
1213
1214 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1215
1216 for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1217
1218 sg_init_one(&sg, page, sizeof(*page));
1219 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1220 dbg("boot scan: can't read pba %d", pba);
1221 continue;
1222 }
1223
1224 if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1225 dbg("management flag doesn't indicate boot block %d",
1226 pba);
1227 continue;
1228 }
1229
1230 if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1231 dbg("the pba at %d doesn't contain boot block ID", pba);
1232 continue;
1233 }
1234
1235 msb_fix_boot_page_endianness(page);
1236 msb->boot_block_locations[msb->boot_block_count] = pba;
1237
1238 page++;
1239 msb->boot_block_count++;
1240
1241 if (msb->boot_block_count == 2)
1242 break;
1243 }
1244
1245 if (!msb->boot_block_count) {
1246 pr_err("media doesn't contain master page, aborting");
1247 return -EIO;
1248 }
1249
1250 dbg_verbose("End of scan for boot blocks");
1251 return 0;
1252 }
1253
msb_read_bad_block_table(struct msb_data * msb,int block_nr)1254 static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1255 {
1256 struct ms_boot_page *boot_block;
1257 struct scatterlist sg;
1258 u16 *buffer = NULL;
1259 int offset = 0;
1260 int i, error = 0;
1261 int data_size, data_offset, page, page_offset, size_to_read;
1262 u16 pba;
1263
1264 BUG_ON(block_nr > 1);
1265 boot_block = &msb->boot_page[block_nr];
1266 pba = msb->boot_block_locations[block_nr];
1267
1268 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1269 return -EINVAL;
1270
1271 data_size = boot_block->entry.disabled_block.data_size;
1272 data_offset = sizeof(struct ms_boot_page) +
1273 boot_block->entry.disabled_block.start_addr;
1274 if (!data_size)
1275 return 0;
1276
1277 page = data_offset / msb->page_size;
1278 page_offset = data_offset % msb->page_size;
1279 size_to_read =
1280 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1281 msb->page_size;
1282
1283 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1284 pba, data_offset, data_size);
1285
1286 buffer = kzalloc(size_to_read, GFP_KERNEL);
1287 if (!buffer)
1288 return -ENOMEM;
1289
1290 /* Read the buffer */
1291 sg_init_one(&sg, buffer, size_to_read);
1292
1293 while (offset < size_to_read) {
1294 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1295 if (error)
1296 goto out;
1297
1298 page++;
1299 offset += msb->page_size;
1300
1301 if (page == msb->pages_in_block) {
1302 pr_err(
1303 "bad block table extends beyond the boot block");
1304 break;
1305 }
1306 }
1307
1308 /* Process the bad block table */
1309 for (i = page_offset; i < data_size / sizeof(u16); i++) {
1310
1311 u16 bad_block = be16_to_cpu(buffer[i]);
1312
1313 if (bad_block >= msb->block_count) {
1314 dbg("bad block table contains invalid block %d",
1315 bad_block);
1316 continue;
1317 }
1318
1319 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1320 dbg("duplicate bad block %d in the table",
1321 bad_block);
1322 continue;
1323 }
1324
1325 dbg("block %d is marked as factory bad", bad_block);
1326 msb_mark_block_used(msb, bad_block);
1327 }
1328 out:
1329 kfree(buffer);
1330 return error;
1331 }
1332
msb_ftl_initialize(struct msb_data * msb)1333 static int msb_ftl_initialize(struct msb_data *msb)
1334 {
1335 int i;
1336
1337 if (msb->ftl_initialized)
1338 return 0;
1339
1340 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1341 msb->logical_block_count = msb->zone_count * 496 - 2;
1342
1343 msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1344 msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1345 msb->lba_to_pba_table =
1346 kmalloc_array(msb->logical_block_count, sizeof(u16),
1347 GFP_KERNEL);
1348
1349 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1350 !msb->erased_blocks_bitmap) {
1351 bitmap_free(msb->used_blocks_bitmap);
1352 bitmap_free(msb->erased_blocks_bitmap);
1353 kfree(msb->lba_to_pba_table);
1354 return -ENOMEM;
1355 }
1356
1357 for (i = 0; i < msb->zone_count; i++)
1358 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1359
1360 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1361 msb->logical_block_count * sizeof(u16));
1362
1363 dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1364 msb->zone_count, msb->logical_block_count);
1365
1366 msb->ftl_initialized = true;
1367 return 0;
1368 }
1369
msb_ftl_scan(struct msb_data * msb)1370 static int msb_ftl_scan(struct msb_data *msb)
1371 {
1372 u16 pba, lba, other_block;
1373 u8 overwrite_flag, management_flag, other_overwrite_flag;
1374 int error;
1375 struct ms_extra_data_register extra;
1376 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1377
1378 if (!overwrite_flags)
1379 return -ENOMEM;
1380
1381 dbg("Start of media scanning");
1382 for (pba = 0; pba < msb->block_count; pba++) {
1383
1384 if (pba == msb->boot_block_locations[0] ||
1385 pba == msb->boot_block_locations[1]) {
1386 dbg_verbose("pba %05d -> [boot block]", pba);
1387 msb_mark_block_used(msb, pba);
1388 continue;
1389 }
1390
1391 if (test_bit(pba, msb->used_blocks_bitmap)) {
1392 dbg_verbose("pba %05d -> [factory bad]", pba);
1393 continue;
1394 }
1395
1396 memset(&extra, 0, sizeof(extra));
1397 error = msb_read_oob(msb, pba, 0, &extra);
1398
1399 /* can't trust the page if we can't read the oob */
1400 if (error == -EBADMSG) {
1401 pr_notice(
1402 "oob of pba %d damaged, will try to erase it", pba);
1403 msb_mark_block_used(msb, pba);
1404 msb_erase_block(msb, pba);
1405 continue;
1406 } else if (error) {
1407 pr_err("unknown error %d on read of oob of pba %d - aborting",
1408 error, pba);
1409
1410 kfree(overwrite_flags);
1411 return error;
1412 }
1413
1414 lba = be16_to_cpu(extra.logical_address);
1415 management_flag = extra.management_flag;
1416 overwrite_flag = extra.overwrite_flag;
1417 overwrite_flags[pba] = overwrite_flag;
1418
1419 /* Skip bad blocks */
1420 if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1421 dbg("pba %05d -> [BAD]", pba);
1422 msb_mark_block_used(msb, pba);
1423 continue;
1424 }
1425
1426 /* Skip system/drm blocks */
1427 if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1428 MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1429 dbg("pba %05d -> [reserved management flag %02x]",
1430 pba, management_flag);
1431 msb_mark_block_used(msb, pba);
1432 continue;
1433 }
1434
1435 /* Erase temporary tables */
1436 if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1437 dbg("pba %05d -> [temp table] - will erase", pba);
1438
1439 msb_mark_block_used(msb, pba);
1440 msb_erase_block(msb, pba);
1441 continue;
1442 }
1443
1444 if (lba == MS_BLOCK_INVALID) {
1445 dbg_verbose("pba %05d -> [free]", pba);
1446 continue;
1447 }
1448
1449 msb_mark_block_used(msb, pba);
1450
1451 /* Block has LBA not according to zoning*/
1452 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1453 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1454 pba, lba);
1455 msb_erase_block(msb, pba);
1456 continue;
1457 }
1458
1459 /* No collisions - great */
1460 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1461 dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1462 msb->lba_to_pba_table[lba] = pba;
1463 continue;
1464 }
1465
1466 other_block = msb->lba_to_pba_table[lba];
1467 other_overwrite_flag = overwrite_flags[other_block];
1468
1469 pr_notice("Collision between pba %d and pba %d",
1470 pba, other_block);
1471
1472 if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1473 pr_notice("pba %d is marked as stable, use it", pba);
1474 msb_erase_block(msb, other_block);
1475 msb->lba_to_pba_table[lba] = pba;
1476 continue;
1477 }
1478
1479 if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1480 pr_notice("pba %d is marked as stable, use it",
1481 other_block);
1482 msb_erase_block(msb, pba);
1483 continue;
1484 }
1485
1486 pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1487 pba, other_block, other_block);
1488
1489 msb_erase_block(msb, other_block);
1490 msb->lba_to_pba_table[lba] = pba;
1491 }
1492
1493 dbg("End of media scanning");
1494 kfree(overwrite_flags);
1495 return 0;
1496 }
1497
msb_cache_flush_timer(struct timer_list * t)1498 static void msb_cache_flush_timer(struct timer_list *t)
1499 {
1500 struct msb_data *msb = timer_container_of(msb, t, cache_flush_timer);
1501
1502 msb->need_flush_cache = true;
1503 queue_work(msb->io_queue, &msb->io_work);
1504 }
1505
1506
msb_cache_discard(struct msb_data * msb)1507 static void msb_cache_discard(struct msb_data *msb)
1508 {
1509 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1510 return;
1511
1512 timer_delete_sync(&msb->cache_flush_timer);
1513
1514 dbg_verbose("Discarding the write cache");
1515 msb->cache_block_lba = MS_BLOCK_INVALID;
1516 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1517 }
1518
msb_cache_init(struct msb_data * msb)1519 static int msb_cache_init(struct msb_data *msb)
1520 {
1521 timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1522
1523 if (!msb->cache)
1524 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1525 if (!msb->cache)
1526 return -ENOMEM;
1527
1528 msb_cache_discard(msb);
1529 return 0;
1530 }
1531
msb_cache_flush(struct msb_data * msb)1532 static int msb_cache_flush(struct msb_data *msb)
1533 {
1534 struct scatterlist sg;
1535 struct ms_extra_data_register extra;
1536 int page, offset, error;
1537 u16 pba, lba;
1538
1539 if (msb->read_only)
1540 return -EROFS;
1541
1542 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1543 return 0;
1544
1545 lba = msb->cache_block_lba;
1546 pba = msb->lba_to_pba_table[lba];
1547
1548 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1549 pba, msb->cache_block_lba);
1550
1551 sg_init_one(&sg, msb->cache , msb->block_size);
1552
1553 /* Read all missing pages in cache */
1554 for (page = 0; page < msb->pages_in_block; page++) {
1555
1556 if (test_bit(page, &msb->valid_cache_bitmap))
1557 continue;
1558
1559 offset = page * msb->page_size;
1560
1561 dbg_verbose("reading non-present sector %d of cache block %d",
1562 page, lba);
1563 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1564
1565 /* Bad pages are copied with 00 page status */
1566 if (error == -EBADMSG) {
1567 pr_err("read error on sector %d, contents probably damaged", page);
1568 continue;
1569 }
1570
1571 if (error)
1572 return error;
1573
1574 if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1575 MEMSTICK_OV_PG_NORMAL) {
1576 dbg("page %d is marked as bad", page);
1577 continue;
1578 }
1579
1580 set_bit(page, &msb->valid_cache_bitmap);
1581 }
1582
1583 /* Write the cache now */
1584 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1585 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1586
1587 /* Mark invalid pages */
1588 if (!error) {
1589 for (page = 0; page < msb->pages_in_block; page++) {
1590
1591 if (test_bit(page, &msb->valid_cache_bitmap))
1592 continue;
1593
1594 dbg("marking page %d as containing damaged data",
1595 page);
1596 msb_set_overwrite_flag(msb,
1597 pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1598 }
1599 }
1600
1601 msb_cache_discard(msb);
1602 return error;
1603 }
1604
msb_cache_write(struct msb_data * msb,int lba,int page,bool add_to_cache_only,struct scatterlist * sg,int offset)1605 static int msb_cache_write(struct msb_data *msb, int lba,
1606 int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1607 {
1608 int error;
1609 struct scatterlist sg_tmp[10];
1610
1611 if (msb->read_only)
1612 return -EROFS;
1613
1614 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1615 lba != msb->cache_block_lba)
1616 if (add_to_cache_only)
1617 return 0;
1618
1619 /* If we need to write different block */
1620 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1621 lba != msb->cache_block_lba) {
1622 dbg_verbose("first flush the cache");
1623 error = msb_cache_flush(msb);
1624 if (error)
1625 return error;
1626 }
1627
1628 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1629 msb->cache_block_lba = lba;
1630 mod_timer(&msb->cache_flush_timer,
1631 jiffies + msecs_to_jiffies(cache_flush_timeout));
1632 }
1633
1634 dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1635
1636 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1637 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1638
1639 sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1640 msb->cache + page * msb->page_size, msb->page_size);
1641
1642 set_bit(page, &msb->valid_cache_bitmap);
1643 return 0;
1644 }
1645
msb_cache_read(struct msb_data * msb,int lba,int page,struct scatterlist * sg,int offset)1646 static int msb_cache_read(struct msb_data *msb, int lba,
1647 int page, struct scatterlist *sg, int offset)
1648 {
1649 int pba = msb->lba_to_pba_table[lba];
1650 struct scatterlist sg_tmp[10];
1651 int error = 0;
1652
1653 if (lba == msb->cache_block_lba &&
1654 test_bit(page, &msb->valid_cache_bitmap)) {
1655
1656 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1657 lba, pba, page);
1658
1659 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1660 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1661 offset, msb->page_size);
1662 sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1663 msb->cache + msb->page_size * page,
1664 msb->page_size);
1665 } else {
1666 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1667 lba, pba, page);
1668
1669 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1670 if (error)
1671 return error;
1672
1673 msb_cache_write(msb, lba, page, true, sg, offset);
1674 }
1675 return error;
1676 }
1677
1678 /* Emulated geometry table
1679 * This table content isn't that importaint,
1680 * One could put here different values, providing that they still
1681 * cover whole disk.
1682 * 64 MB entry is what windows reports for my 64M memstick
1683 */
1684
1685 static const struct chs_entry chs_table[] = {
1686 /* size sectors cylinders heads */
1687 { 4, 16, 247, 2 },
1688 { 8, 16, 495, 2 },
1689 { 16, 16, 495, 4 },
1690 { 32, 16, 991, 4 },
1691 { 64, 16, 991, 8 },
1692 {128, 16, 991, 16 },
1693 { 0 }
1694 };
1695
1696 /* Load information about the card */
msb_init_card(struct memstick_dev * card)1697 static int msb_init_card(struct memstick_dev *card)
1698 {
1699 struct msb_data *msb = memstick_get_drvdata(card);
1700 struct memstick_host *host = card->host;
1701 struct ms_boot_page *boot_block;
1702 int error = 0, i, raw_size_in_megs;
1703
1704 msb->caps = 0;
1705
1706 if (card->id.class >= MEMSTICK_CLASS_ROM &&
1707 card->id.class <= MEMSTICK_CLASS_ROM)
1708 msb->read_only = true;
1709
1710 msb->state = -1;
1711 error = msb_reset(msb, false);
1712 if (error)
1713 return error;
1714
1715 /* Due to a bug in Jmicron driver written by Alex Dubov,
1716 * its serial mode barely works,
1717 * so we switch to parallel mode right away
1718 */
1719 if (host->caps & MEMSTICK_CAP_PAR4)
1720 msb_switch_to_parallel(msb);
1721
1722 msb->page_size = sizeof(struct ms_boot_page);
1723
1724 /* Read the boot page */
1725 error = msb_read_boot_blocks(msb);
1726 if (error)
1727 return -EIO;
1728
1729 boot_block = &msb->boot_page[0];
1730
1731 /* Save interesting attributes from boot page */
1732 msb->block_count = boot_block->attr.number_of_blocks;
1733 msb->page_size = boot_block->attr.page_size;
1734
1735 msb->pages_in_block = boot_block->attr.block_size * 2;
1736 msb->block_size = msb->page_size * msb->pages_in_block;
1737
1738 if ((size_t)msb->page_size > PAGE_SIZE) {
1739 /* this isn't supported by linux at all, anyway*/
1740 dbg("device page %d size isn't supported", msb->page_size);
1741 return -EINVAL;
1742 }
1743
1744 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1745 if (!msb->block_buffer)
1746 return -ENOMEM;
1747
1748 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1749
1750 for (i = 0; chs_table[i].size; i++) {
1751
1752 if (chs_table[i].size != raw_size_in_megs)
1753 continue;
1754
1755 msb->geometry.cylinders = chs_table[i].cyl;
1756 msb->geometry.heads = chs_table[i].head;
1757 msb->geometry.sectors = chs_table[i].sec;
1758 break;
1759 }
1760
1761 if (boot_block->attr.transfer_supporting == 1)
1762 msb->caps |= MEMSTICK_CAP_PAR4;
1763
1764 if (boot_block->attr.device_type & 0x03)
1765 msb->read_only = true;
1766
1767 dbg("Total block count = %d", msb->block_count);
1768 dbg("Each block consists of %d pages", msb->pages_in_block);
1769 dbg("Page size = %d bytes", msb->page_size);
1770 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1771 dbg("Read only: %d", msb->read_only);
1772
1773 #if 0
1774 /* Now we can switch the interface */
1775 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1776 msb_switch_to_parallel(msb);
1777 #endif
1778
1779 error = msb_cache_init(msb);
1780 if (error)
1781 return error;
1782
1783 error = msb_ftl_initialize(msb);
1784 if (error)
1785 return error;
1786
1787
1788 /* Read the bad block table */
1789 error = msb_read_bad_block_table(msb, 0);
1790
1791 if (error && error != -ENOMEM) {
1792 dbg("failed to read bad block table from primary boot block, trying from backup");
1793 error = msb_read_bad_block_table(msb, 1);
1794 }
1795
1796 if (error)
1797 return error;
1798
1799 /* *drum roll* Scan the media */
1800 error = msb_ftl_scan(msb);
1801 if (error) {
1802 pr_err("Scan of media failed");
1803 return error;
1804 }
1805
1806 return 0;
1807
1808 }
1809
msb_do_write_request(struct msb_data * msb,int lba,int page,struct scatterlist * sg,size_t len,int * sucessfuly_written)1810 static int msb_do_write_request(struct msb_data *msb, int lba,
1811 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1812 {
1813 int error = 0;
1814 off_t offset = 0;
1815 *sucessfuly_written = 0;
1816
1817 while (offset < len) {
1818 if (page == 0 && len - offset >= msb->block_size) {
1819
1820 if (msb->cache_block_lba == lba)
1821 msb_cache_discard(msb);
1822
1823 dbg_verbose("Writing whole lba %d", lba);
1824 error = msb_update_block(msb, lba, sg, offset);
1825 if (error)
1826 return error;
1827
1828 offset += msb->block_size;
1829 *sucessfuly_written += msb->block_size;
1830 lba++;
1831 continue;
1832 }
1833
1834 error = msb_cache_write(msb, lba, page, false, sg, offset);
1835 if (error)
1836 return error;
1837
1838 offset += msb->page_size;
1839 *sucessfuly_written += msb->page_size;
1840
1841 page++;
1842 if (page == msb->pages_in_block) {
1843 page = 0;
1844 lba++;
1845 }
1846 }
1847 return 0;
1848 }
1849
msb_do_read_request(struct msb_data * msb,int lba,int page,struct scatterlist * sg,int len,int * sucessfuly_read)1850 static int msb_do_read_request(struct msb_data *msb, int lba,
1851 int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1852 {
1853 int error = 0;
1854 int offset = 0;
1855 *sucessfuly_read = 0;
1856
1857 while (offset < len) {
1858
1859 error = msb_cache_read(msb, lba, page, sg, offset);
1860 if (error)
1861 return error;
1862
1863 offset += msb->page_size;
1864 *sucessfuly_read += msb->page_size;
1865
1866 page++;
1867 if (page == msb->pages_in_block) {
1868 page = 0;
1869 lba++;
1870 }
1871 }
1872 return 0;
1873 }
1874
msb_io_work(struct work_struct * work)1875 static void msb_io_work(struct work_struct *work)
1876 {
1877 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1878 int page, error, len;
1879 sector_t lba;
1880 struct scatterlist *sg = msb->prealloc_sg;
1881 struct request *req;
1882
1883 dbg_verbose("IO: work started");
1884
1885 while (1) {
1886 spin_lock_irq(&msb->q_lock);
1887
1888 if (msb->need_flush_cache) {
1889 msb->need_flush_cache = false;
1890 spin_unlock_irq(&msb->q_lock);
1891 msb_cache_flush(msb);
1892 continue;
1893 }
1894
1895 req = msb->req;
1896 if (!req) {
1897 dbg_verbose("IO: no more requests exiting");
1898 spin_unlock_irq(&msb->q_lock);
1899 return;
1900 }
1901
1902 spin_unlock_irq(&msb->q_lock);
1903
1904 /* process the request */
1905 dbg_verbose("IO: processing new request");
1906 blk_rq_map_sg(req, sg);
1907
1908 lba = blk_rq_pos(req);
1909
1910 sector_div(lba, msb->page_size / 512);
1911 page = sector_div(lba, msb->pages_in_block);
1912
1913 if (rq_data_dir(msb->req) == READ)
1914 error = msb_do_read_request(msb, lba, page, sg,
1915 blk_rq_bytes(req), &len);
1916 else
1917 error = msb_do_write_request(msb, lba, page, sg,
1918 blk_rq_bytes(req), &len);
1919
1920 if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1921 __blk_mq_end_request(req, BLK_STS_OK);
1922 spin_lock_irq(&msb->q_lock);
1923 msb->req = NULL;
1924 spin_unlock_irq(&msb->q_lock);
1925 }
1926
1927 if (error && msb->req) {
1928 blk_status_t ret = errno_to_blk_status(error);
1929
1930 dbg_verbose("IO: ending one sector of the request with error");
1931 blk_mq_end_request(req, ret);
1932 spin_lock_irq(&msb->q_lock);
1933 msb->req = NULL;
1934 spin_unlock_irq(&msb->q_lock);
1935 }
1936
1937 if (msb->req)
1938 dbg_verbose("IO: request still pending");
1939 }
1940 }
1941
1942 static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1943 static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1944
msb_data_clear(struct msb_data * msb)1945 static void msb_data_clear(struct msb_data *msb)
1946 {
1947 kfree(msb->boot_page);
1948 bitmap_free(msb->used_blocks_bitmap);
1949 bitmap_free(msb->erased_blocks_bitmap);
1950 kfree(msb->lba_to_pba_table);
1951 kfree(msb->cache);
1952 msb->card = NULL;
1953 }
1954
msb_bd_getgeo(struct gendisk * disk,struct hd_geometry * geo)1955 static int msb_bd_getgeo(struct gendisk *disk,
1956 struct hd_geometry *geo)
1957 {
1958 struct msb_data *msb = disk->private_data;
1959 *geo = msb->geometry;
1960 return 0;
1961 }
1962
msb_bd_free_disk(struct gendisk * disk)1963 static void msb_bd_free_disk(struct gendisk *disk)
1964 {
1965 struct msb_data *msb = disk->private_data;
1966
1967 mutex_lock(&msb_disk_lock);
1968 idr_remove(&msb_disk_idr, msb->disk_id);
1969 mutex_unlock(&msb_disk_lock);
1970
1971 kfree(msb);
1972 }
1973
msb_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1974 static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
1975 const struct blk_mq_queue_data *bd)
1976 {
1977 struct memstick_dev *card = hctx->queue->queuedata;
1978 struct msb_data *msb = memstick_get_drvdata(card);
1979 struct request *req = bd->rq;
1980
1981 dbg_verbose("Submit request");
1982
1983 spin_lock_irq(&msb->q_lock);
1984
1985 if (msb->card_dead) {
1986 dbg("Refusing requests on removed card");
1987
1988 WARN_ON(!msb->io_queue_stopped);
1989
1990 spin_unlock_irq(&msb->q_lock);
1991 blk_mq_start_request(req);
1992 return BLK_STS_IOERR;
1993 }
1994
1995 if (msb->req) {
1996 spin_unlock_irq(&msb->q_lock);
1997 return BLK_STS_DEV_RESOURCE;
1998 }
1999
2000 blk_mq_start_request(req);
2001 msb->req = req;
2002
2003 if (!msb->io_queue_stopped)
2004 queue_work(msb->io_queue, &msb->io_work);
2005
2006 spin_unlock_irq(&msb->q_lock);
2007 return BLK_STS_OK;
2008 }
2009
msb_check_card(struct memstick_dev * card)2010 static int msb_check_card(struct memstick_dev *card)
2011 {
2012 struct msb_data *msb = memstick_get_drvdata(card);
2013
2014 return (msb->card_dead == 0);
2015 }
2016
msb_stop(struct memstick_dev * card)2017 static void msb_stop(struct memstick_dev *card)
2018 {
2019 struct msb_data *msb = memstick_get_drvdata(card);
2020 unsigned long flags;
2021
2022 dbg("Stopping all msblock IO");
2023
2024 blk_mq_stop_hw_queues(msb->queue);
2025 spin_lock_irqsave(&msb->q_lock, flags);
2026 msb->io_queue_stopped = true;
2027 spin_unlock_irqrestore(&msb->q_lock, flags);
2028
2029 timer_delete_sync(&msb->cache_flush_timer);
2030 flush_workqueue(msb->io_queue);
2031
2032 spin_lock_irqsave(&msb->q_lock, flags);
2033 if (msb->req) {
2034 blk_mq_requeue_request(msb->req, false);
2035 msb->req = NULL;
2036 }
2037 spin_unlock_irqrestore(&msb->q_lock, flags);
2038 }
2039
msb_start(struct memstick_dev * card)2040 static void msb_start(struct memstick_dev *card)
2041 {
2042 struct msb_data *msb = memstick_get_drvdata(card);
2043 unsigned long flags;
2044
2045 dbg("Resuming IO from msblock");
2046
2047 msb_invalidate_reg_window(msb);
2048
2049 spin_lock_irqsave(&msb->q_lock, flags);
2050 if (!msb->io_queue_stopped || msb->card_dead) {
2051 spin_unlock_irqrestore(&msb->q_lock, flags);
2052 return;
2053 }
2054 spin_unlock_irqrestore(&msb->q_lock, flags);
2055
2056 /* Kick cache flush anyway, its harmless */
2057 msb->need_flush_cache = true;
2058 msb->io_queue_stopped = false;
2059
2060 blk_mq_start_hw_queues(msb->queue);
2061
2062 queue_work(msb->io_queue, &msb->io_work);
2063
2064 }
2065
2066 static const struct block_device_operations msb_bdops = {
2067 .owner = THIS_MODULE,
2068 .getgeo = msb_bd_getgeo,
2069 .free_disk = msb_bd_free_disk,
2070 };
2071
2072 static const struct blk_mq_ops msb_mq_ops = {
2073 .queue_rq = msb_queue_rq,
2074 };
2075
2076 /* Registers the block device */
msb_init_disk(struct memstick_dev * card)2077 static int msb_init_disk(struct memstick_dev *card)
2078 {
2079 struct msb_data *msb = memstick_get_drvdata(card);
2080 struct queue_limits lim = {
2081 .logical_block_size = msb->page_size,
2082 .max_hw_sectors = MS_BLOCK_MAX_PAGES,
2083 .max_segments = MS_BLOCK_MAX_SEGS,
2084 .max_segment_size = MS_BLOCK_MAX_PAGES * msb->page_size,
2085 };
2086 int rc;
2087 unsigned long capacity;
2088
2089 mutex_lock(&msb_disk_lock);
2090 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2091 mutex_unlock(&msb_disk_lock);
2092
2093 if (msb->disk_id < 0)
2094 return msb->disk_id;
2095
2096 rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, 0);
2097 if (rc)
2098 goto out_release_id;
2099
2100 msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card);
2101 if (IS_ERR(msb->disk)) {
2102 rc = PTR_ERR(msb->disk);
2103 goto out_free_tag_set;
2104 }
2105 msb->queue = msb->disk->queue;
2106
2107 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2108 msb->disk->fops = &msb_bdops;
2109 msb->disk->private_data = msb;
2110
2111 capacity = msb->pages_in_block * msb->logical_block_count;
2112 capacity *= (msb->page_size / 512);
2113 set_capacity(msb->disk, capacity);
2114 dbg("Set total disk size to %lu sectors", capacity);
2115
2116 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2117 if (!msb->io_queue) {
2118 rc = -ENOMEM;
2119 goto out_cleanup_disk;
2120 }
2121
2122 INIT_WORK(&msb->io_work, msb_io_work);
2123 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2124
2125 if (msb->read_only)
2126 set_disk_ro(msb->disk, 1);
2127
2128 msb_start(card);
2129 rc = device_add_disk(&card->dev, msb->disk, NULL);
2130 if (rc)
2131 goto out_destroy_workqueue;
2132 dbg("Disk added");
2133 return 0;
2134
2135 out_destroy_workqueue:
2136 destroy_workqueue(msb->io_queue);
2137 out_cleanup_disk:
2138 put_disk(msb->disk);
2139 out_free_tag_set:
2140 blk_mq_free_tag_set(&msb->tag_set);
2141 out_release_id:
2142 mutex_lock(&msb_disk_lock);
2143 idr_remove(&msb_disk_idr, msb->disk_id);
2144 mutex_unlock(&msb_disk_lock);
2145 return rc;
2146 }
2147
msb_probe(struct memstick_dev * card)2148 static int msb_probe(struct memstick_dev *card)
2149 {
2150 struct msb_data *msb;
2151 int rc = 0;
2152
2153 msb = kzalloc_obj(struct msb_data);
2154 if (!msb)
2155 return -ENOMEM;
2156 memstick_set_drvdata(card, msb);
2157 msb->card = card;
2158 spin_lock_init(&msb->q_lock);
2159
2160 rc = msb_init_card(card);
2161 if (rc)
2162 goto out_free;
2163
2164 rc = msb_init_disk(card);
2165 if (!rc) {
2166 card->check = msb_check_card;
2167 card->stop = msb_stop;
2168 card->start = msb_start;
2169 return 0;
2170 }
2171 out_free:
2172 memstick_set_drvdata(card, NULL);
2173 msb_data_clear(msb);
2174 kfree(msb);
2175 return rc;
2176 }
2177
msb_remove(struct memstick_dev * card)2178 static void msb_remove(struct memstick_dev *card)
2179 {
2180 struct msb_data *msb = memstick_get_drvdata(card);
2181 unsigned long flags;
2182
2183 if (!msb->io_queue_stopped)
2184 msb_stop(card);
2185
2186 dbg("Removing the disk device");
2187
2188 /* Take care of unhandled + new requests from now on */
2189 spin_lock_irqsave(&msb->q_lock, flags);
2190 msb->card_dead = true;
2191 spin_unlock_irqrestore(&msb->q_lock, flags);
2192 blk_mq_start_hw_queues(msb->queue);
2193
2194 /* Remove the disk */
2195 del_gendisk(msb->disk);
2196 blk_mq_free_tag_set(&msb->tag_set);
2197 msb->queue = NULL;
2198
2199 mutex_lock(&msb_disk_lock);
2200 msb_data_clear(msb);
2201 mutex_unlock(&msb_disk_lock);
2202
2203 put_disk(msb->disk);
2204 memstick_set_drvdata(card, NULL);
2205 }
2206
2207 #ifdef CONFIG_PM
2208
msb_suspend(struct memstick_dev * card,pm_message_t state)2209 static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2210 {
2211 msb_stop(card);
2212 return 0;
2213 }
2214
msb_resume(struct memstick_dev * card)2215 static int msb_resume(struct memstick_dev *card)
2216 {
2217 struct msb_data *msb = memstick_get_drvdata(card);
2218 struct msb_data *new_msb = NULL;
2219 bool card_dead = true;
2220
2221 #ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2222 msb->card_dead = true;
2223 return 0;
2224 #endif
2225 mutex_lock(&card->host->lock);
2226
2227 new_msb = kzalloc_obj(struct msb_data);
2228 if (!new_msb)
2229 goto out;
2230
2231 new_msb->card = card;
2232 memstick_set_drvdata(card, new_msb);
2233 spin_lock_init(&new_msb->q_lock);
2234 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2235
2236 if (msb_init_card(card))
2237 goto out;
2238
2239 if (msb->block_size != new_msb->block_size)
2240 goto out;
2241
2242 if (memcmp(msb->boot_page, new_msb->boot_page,
2243 sizeof(struct ms_boot_page)))
2244 goto out;
2245
2246 if (msb->logical_block_count != new_msb->logical_block_count ||
2247 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2248 msb->logical_block_count))
2249 goto out;
2250
2251 if (msb->block_count != new_msb->block_count ||
2252 !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2253 msb->block_count))
2254 goto out;
2255
2256 card_dead = false;
2257 out:
2258 if (card_dead)
2259 dbg("Card was removed/replaced during suspend");
2260
2261 msb->card_dead = card_dead;
2262 memstick_set_drvdata(card, msb);
2263
2264 if (new_msb) {
2265 msb_data_clear(new_msb);
2266 kfree(new_msb);
2267 }
2268
2269 msb_start(card);
2270 mutex_unlock(&card->host->lock);
2271 return 0;
2272 }
2273 #else
2274
2275 #define msb_suspend NULL
2276 #define msb_resume NULL
2277
2278 #endif /* CONFIG_PM */
2279
2280 static const struct memstick_device_id msb_id_tbl[] = {
2281 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2282 MEMSTICK_CLASS_FLASH},
2283
2284 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2285 MEMSTICK_CLASS_ROM},
2286
2287 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2288 MEMSTICK_CLASS_RO},
2289
2290 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2291 MEMSTICK_CLASS_WP},
2292
2293 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2294 MEMSTICK_CLASS_DUO},
2295 {}
2296 };
2297 MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2298
2299
2300 static struct memstick_driver msb_driver = {
2301 .driver = {
2302 .name = DRIVER_NAME,
2303 .owner = THIS_MODULE
2304 },
2305 .id_table = msb_id_tbl,
2306 .probe = msb_probe,
2307 .remove = msb_remove,
2308 .suspend = msb_suspend,
2309 .resume = msb_resume
2310 };
2311
msb_init(void)2312 static int __init msb_init(void)
2313 {
2314 int rc = memstick_register_driver(&msb_driver);
2315
2316 if (rc)
2317 pr_err("failed to register memstick driver (error %d)\n", rc);
2318
2319 return rc;
2320 }
2321
msb_exit(void)2322 static void __exit msb_exit(void)
2323 {
2324 memstick_unregister_driver(&msb_driver);
2325 idr_destroy(&msb_disk_idr);
2326 }
2327
2328 module_init(msb_init);
2329 module_exit(msb_exit);
2330
2331 module_param(cache_flush_timeout, int, S_IRUGO);
2332 MODULE_PARM_DESC(cache_flush_timeout,
2333 "Cache flush timeout in msec (1000 default)");
2334 module_param(debug, int, S_IRUGO | S_IWUSR);
2335 MODULE_PARM_DESC(debug, "Debug level (0-2)");
2336
2337 module_param(verify_writes, bool, S_IRUGO);
2338 MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2339
2340 MODULE_LICENSE("GPL");
2341 MODULE_AUTHOR("Maxim Levitsky");
2342 MODULE_DESCRIPTION("Sony MemoryStick block device driver");
2343