1 // SPDX-License-Identifier: GPL-2.0-only
2 /* vmu-flash.c
3 * Driver for SEGA Dreamcast Visual Memory Unit
4 *
5 * Copyright (c) Adrian McMenamin 2002 - 2009
6 * Copyright (c) Paul Mundt 2001
7 */
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/delay.h>
12 #include <linux/maple.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/mtd/map.h>
15
16 struct vmu_cache {
17 unsigned char *buffer; /* Cache */
18 unsigned int block; /* Which block was cached */
19 unsigned long jiffies_atc; /* When was it cached? */
20 int valid;
21 };
22
23 struct mdev_part {
24 struct maple_device *mdev;
25 int partition;
26 };
27
28 struct vmupart {
29 u16 user_blocks;
30 u16 root_block;
31 u16 numblocks;
32 char *name;
33 struct vmu_cache *pcache;
34 };
35
36 struct memcard {
37 u16 tempA;
38 u16 tempB;
39 u32 partitions;
40 u32 blocklen;
41 u32 writecnt;
42 u32 readcnt;
43 u32 removable;
44 int partition;
45 int read;
46 unsigned char *blockread;
47 struct vmupart *parts;
48 struct mtd_info *mtd;
49 };
50
51 struct vmu_block {
52 unsigned int num; /* block number */
53 unsigned int ofs; /* block offset */
54 };
55
ofs_to_block(unsigned long src_ofs,struct mtd_info * mtd,int partition)56 static struct vmu_block *ofs_to_block(unsigned long src_ofs,
57 struct mtd_info *mtd, int partition)
58 {
59 struct vmu_block *vblock;
60 struct maple_device *mdev;
61 struct memcard *card;
62 struct mdev_part *mpart;
63 int num;
64
65 mpart = mtd->priv;
66 mdev = mpart->mdev;
67 card = maple_get_drvdata(mdev);
68
69 if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
70 goto failed;
71
72 num = src_ofs / card->blocklen;
73 if (num > card->parts[partition].numblocks)
74 goto failed;
75
76 vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
77 if (!vblock)
78 goto failed;
79
80 vblock->num = num;
81 vblock->ofs = src_ofs % card->blocklen;
82 return vblock;
83
84 failed:
85 return NULL;
86 }
87
88 /* Maple bus callback function for reads */
vmu_blockread(struct mapleq * mq)89 static void vmu_blockread(struct mapleq *mq)
90 {
91 struct maple_device *mdev;
92 struct memcard *card;
93
94 mdev = mq->dev;
95 card = maple_get_drvdata(mdev);
96 /* copy the read in data */
97
98 if (unlikely(!card->blockread))
99 return;
100
101 memcpy(card->blockread, mq->recvbuf->buf + 12,
102 card->blocklen/card->readcnt);
103
104 }
105
106 /* Interface with maple bus to read blocks
107 * caching the results so that other parts
108 * of the driver can access block reads */
maple_vmu_read_block(unsigned int num,unsigned char * buf,struct mtd_info * mtd)109 static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
110 struct mtd_info *mtd)
111 {
112 struct memcard *card;
113 struct mdev_part *mpart;
114 struct maple_device *mdev;
115 int partition, error = 0, x, wait;
116 unsigned char *blockread = NULL;
117 struct vmu_cache *pcache;
118 __be32 sendbuf;
119
120 mpart = mtd->priv;
121 mdev = mpart->mdev;
122 partition = mpart->partition;
123 card = maple_get_drvdata(mdev);
124 pcache = card->parts[partition].pcache;
125 pcache->valid = 0;
126
127 /* prepare the cache for this block */
128 if (!pcache->buffer) {
129 pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
130 if (!pcache->buffer) {
131 dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
132 " to lack of memory\n", mdev->port,
133 mdev->unit);
134 error = -ENOMEM;
135 goto outB;
136 }
137 }
138
139 /*
140 * Reads may be phased - again the hardware spec
141 * supports this - though may not be any devices in
142 * the wild that implement it, but we will here
143 */
144 for (x = 0; x < card->readcnt; x++) {
145 sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
146
147 if (atomic_read(&mdev->busy) == 1) {
148 wait_event_interruptible_timeout(mdev->maple_wait,
149 atomic_read(&mdev->busy) == 0, HZ);
150 if (atomic_read(&mdev->busy) == 1) {
151 dev_notice(&mdev->dev, "VMU at (%d, %d)"
152 " is busy\n", mdev->port, mdev->unit);
153 error = -EAGAIN;
154 goto outB;
155 }
156 }
157
158 atomic_set(&mdev->busy, 1);
159 blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
160 if (!blockread) {
161 error = -ENOMEM;
162 atomic_set(&mdev->busy, 0);
163 goto outB;
164 }
165 card->blockread = blockread;
166
167 maple_getcond_callback(mdev, vmu_blockread, 0,
168 MAPLE_FUNC_MEMCARD);
169 error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
170 MAPLE_COMMAND_BREAD, 2, &sendbuf);
171 /* Very long timeouts seem to be needed when box is stressed */
172 wait = wait_event_interruptible_timeout(mdev->maple_wait,
173 (atomic_read(&mdev->busy) == 0 ||
174 atomic_read(&mdev->busy) == 2), HZ * 3);
175 /*
176 * MTD layer does not handle hotplugging well
177 * so have to return errors when VMU is unplugged
178 * in the middle of a read (busy == 2)
179 */
180 if (error || atomic_read(&mdev->busy) == 2) {
181 if (atomic_read(&mdev->busy) == 2)
182 error = -ENXIO;
183 atomic_set(&mdev->busy, 0);
184 card->blockread = NULL;
185 goto outA;
186 }
187 if (wait == 0 || wait == -ERESTARTSYS) {
188 card->blockread = NULL;
189 atomic_set(&mdev->busy, 0);
190 error = -EIO;
191 list_del_init(&(mdev->mq->list));
192 kfree(mdev->mq->sendbuf);
193 mdev->mq->sendbuf = NULL;
194 if (wait == -ERESTARTSYS) {
195 dev_warn(&mdev->dev, "VMU read on (%d, %d)"
196 " interrupted on block 0x%X\n",
197 mdev->port, mdev->unit, num);
198 } else
199 dev_notice(&mdev->dev, "VMU read on (%d, %d)"
200 " timed out on block 0x%X\n",
201 mdev->port, mdev->unit, num);
202 goto outA;
203 }
204
205 memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
206 card->blocklen/card->readcnt);
207
208 memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
209 card->blockread, card->blocklen/card->readcnt);
210 card->blockread = NULL;
211 pcache->block = num;
212 pcache->jiffies_atc = jiffies;
213 pcache->valid = 1;
214 kfree(blockread);
215 }
216
217 return error;
218
219 outA:
220 kfree(blockread);
221 outB:
222 return error;
223 }
224
225 /* communicate with maple bus for phased writing */
maple_vmu_write_block(unsigned int num,const unsigned char * buf,struct mtd_info * mtd)226 static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
227 struct mtd_info *mtd)
228 {
229 struct memcard *card;
230 struct mdev_part *mpart;
231 struct maple_device *mdev;
232 int partition, error, locking, x, phaselen, wait;
233 __be32 *sendbuf;
234
235 mpart = mtd->priv;
236 mdev = mpart->mdev;
237 partition = mpart->partition;
238 card = maple_get_drvdata(mdev);
239
240 phaselen = card->blocklen/card->writecnt;
241
242 sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
243 if (!sendbuf) {
244 error = -ENOMEM;
245 goto fail_nosendbuf;
246 }
247 for (x = 0; x < card->writecnt; x++) {
248 sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
249 memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
250 /* wait until the device is not busy doing something else
251 * or 1 second - which ever is longer */
252 if (atomic_read(&mdev->busy) == 1) {
253 wait_event_interruptible_timeout(mdev->maple_wait,
254 atomic_read(&mdev->busy) == 0, HZ);
255 if (atomic_read(&mdev->busy) == 1) {
256 error = -EBUSY;
257 dev_notice(&mdev->dev, "VMU write at (%d, %d)"
258 "failed - device is busy\n",
259 mdev->port, mdev->unit);
260 goto fail_nolock;
261 }
262 }
263 atomic_set(&mdev->busy, 1);
264
265 locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
266 MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
267 wait = wait_event_interruptible_timeout(mdev->maple_wait,
268 atomic_read(&mdev->busy) == 0, HZ/10);
269 if (locking) {
270 error = -EIO;
271 atomic_set(&mdev->busy, 0);
272 goto fail_nolock;
273 }
274 if (atomic_read(&mdev->busy) == 2) {
275 atomic_set(&mdev->busy, 0);
276 } else if (wait == 0 || wait == -ERESTARTSYS) {
277 error = -EIO;
278 dev_warn(&mdev->dev, "Write at (%d, %d) of block"
279 " 0x%X at phase %d failed: could not"
280 " communicate with VMU", mdev->port,
281 mdev->unit, num, x);
282 atomic_set(&mdev->busy, 0);
283 kfree(mdev->mq->sendbuf);
284 mdev->mq->sendbuf = NULL;
285 list_del_init(&(mdev->mq->list));
286 goto fail_nolock;
287 }
288 }
289 kfree(sendbuf);
290
291 return card->blocklen;
292
293 fail_nolock:
294 kfree(sendbuf);
295 fail_nosendbuf:
296 dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
297 mdev->unit);
298 return error;
299 }
300
301 /* mtd function to simulate reading byte by byte */
vmu_flash_read_char(unsigned long ofs,int * retval,struct mtd_info * mtd)302 static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
303 struct mtd_info *mtd)
304 {
305 struct vmu_block *vblock;
306 struct memcard *card;
307 struct mdev_part *mpart;
308 struct maple_device *mdev;
309 unsigned char *buf, ret;
310 int partition, error;
311
312 mpart = mtd->priv;
313 mdev = mpart->mdev;
314 partition = mpart->partition;
315 card = maple_get_drvdata(mdev);
316 *retval = 0;
317
318 buf = kmalloc(card->blocklen, GFP_KERNEL);
319 if (!buf) {
320 *retval = 1;
321 ret = -ENOMEM;
322 goto finish;
323 }
324
325 vblock = ofs_to_block(ofs, mtd, partition);
326 if (!vblock) {
327 *retval = 3;
328 ret = -ENOMEM;
329 goto out_buf;
330 }
331
332 error = maple_vmu_read_block(vblock->num, buf, mtd);
333 if (error) {
334 ret = error;
335 *retval = 2;
336 goto out_vblock;
337 }
338
339 ret = buf[vblock->ofs];
340
341 out_vblock:
342 kfree(vblock);
343 out_buf:
344 kfree(buf);
345 finish:
346 return ret;
347 }
348
349 /* mtd higher order function to read flash */
vmu_flash_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)350 static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
351 size_t *retlen, u_char *buf)
352 {
353 struct maple_device *mdev;
354 struct memcard *card;
355 struct mdev_part *mpart;
356 struct vmu_cache *pcache;
357 struct vmu_block *vblock;
358 int index = 0, retval, partition, leftover, numblocks;
359 unsigned char cx;
360
361 mpart = mtd->priv;
362 mdev = mpart->mdev;
363 partition = mpart->partition;
364 card = maple_get_drvdata(mdev);
365
366 numblocks = card->parts[partition].numblocks;
367 if (from + len > numblocks * card->blocklen)
368 len = numblocks * card->blocklen - from;
369 if (len == 0)
370 return -EIO;
371 /* Have we cached this bit already? */
372 pcache = card->parts[partition].pcache;
373 do {
374 vblock = ofs_to_block(from + index, mtd, partition);
375 if (!vblock)
376 return -ENOMEM;
377 /* Have we cached this and is the cache valid and timely? */
378 if (pcache->valid &&
379 time_before(jiffies, pcache->jiffies_atc + HZ) &&
380 (pcache->block == vblock->num)) {
381 /* we have cached it, so do necessary copying */
382 leftover = card->blocklen - vblock->ofs;
383 if (vblock->ofs + len - index < card->blocklen) {
384 /* only a bit of this block to copy */
385 memcpy(buf + index,
386 pcache->buffer + vblock->ofs,
387 len - index);
388 index = len;
389 } else {
390 /* otherwise copy remainder of whole block */
391 memcpy(buf + index, pcache->buffer +
392 vblock->ofs, leftover);
393 index += leftover;
394 }
395 } else {
396 /*
397 * Not cached so read one byte -
398 * but cache the rest of the block
399 */
400 cx = vmu_flash_read_char(from + index, &retval, mtd);
401 if (retval) {
402 *retlen = index;
403 kfree(vblock);
404 return cx;
405 }
406 memset(buf + index, cx, 1);
407 index++;
408 }
409 kfree(vblock);
410 } while (len > index);
411 *retlen = index;
412
413 return 0;
414 }
415
vmu_flash_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)416 static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
417 size_t *retlen, const u_char *buf)
418 {
419 struct maple_device *mdev;
420 struct memcard *card;
421 struct mdev_part *mpart;
422 int index = 0, partition, error = 0, numblocks;
423 struct vmu_cache *pcache;
424 struct vmu_block *vblock;
425 unsigned char *buffer;
426
427 mpart = mtd->priv;
428 mdev = mpart->mdev;
429 partition = mpart->partition;
430 card = maple_get_drvdata(mdev);
431
432 numblocks = card->parts[partition].numblocks;
433 if (to + len > numblocks * card->blocklen)
434 len = numblocks * card->blocklen - to;
435 if (len == 0) {
436 error = -EIO;
437 goto failed;
438 }
439
440 vblock = ofs_to_block(to, mtd, partition);
441 if (!vblock) {
442 error = -ENOMEM;
443 goto failed;
444 }
445
446 buffer = kmalloc(card->blocklen, GFP_KERNEL);
447 if (!buffer) {
448 error = -ENOMEM;
449 goto fail_buffer;
450 }
451
452 do {
453 /* Read in the block we are to write to */
454 error = maple_vmu_read_block(vblock->num, buffer, mtd);
455 if (error)
456 goto fail_io;
457
458 do {
459 buffer[vblock->ofs] = buf[index];
460 vblock->ofs++;
461 index++;
462 if (index >= len)
463 break;
464 } while (vblock->ofs < card->blocklen);
465
466 /* write out new buffer */
467 error = maple_vmu_write_block(vblock->num, buffer, mtd);
468 /* invalidate the cache */
469 pcache = card->parts[partition].pcache;
470 pcache->valid = 0;
471
472 if (error != card->blocklen)
473 goto fail_io;
474
475 vblock->num++;
476 vblock->ofs = 0;
477 } while (len > index);
478
479 kfree(buffer);
480 *retlen = index;
481 kfree(vblock);
482 return 0;
483
484 fail_io:
485 kfree(buffer);
486 fail_buffer:
487 kfree(vblock);
488 failed:
489 dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
490 return error;
491 }
492
vmu_flash_sync(struct mtd_info * mtd)493 static void vmu_flash_sync(struct mtd_info *mtd)
494 {
495 /* Do nothing here */
496 }
497
498 /* Maple bus callback function to recursively query hardware details */
vmu_queryblocks(struct mapleq * mq)499 static void vmu_queryblocks(struct mapleq *mq)
500 {
501 struct maple_device *mdev;
502 unsigned short *res;
503 struct memcard *card;
504 __be32 partnum;
505 struct vmu_cache *pcache;
506 struct mdev_part *mpart;
507 struct mtd_info *mtd_cur;
508 struct vmupart *part_cur;
509 int error;
510
511 mdev = mq->dev;
512 card = maple_get_drvdata(mdev);
513 res = (unsigned short *) (mq->recvbuf->buf);
514 card->tempA = res[12];
515 card->tempB = res[6];
516
517 dev_info(&mdev->dev, "VMU device at partition %d has %d user "
518 "blocks with a root block at %d\n", card->partition,
519 card->tempA, card->tempB);
520
521 part_cur = &card->parts[card->partition];
522 part_cur->user_blocks = card->tempA;
523 part_cur->root_block = card->tempB;
524 part_cur->numblocks = card->tempB + 1;
525 part_cur->name = kmalloc(12, GFP_KERNEL);
526 if (!part_cur->name)
527 goto fail_name;
528
529 sprintf(part_cur->name, "vmu%d.%d.%d",
530 mdev->port, mdev->unit, card->partition);
531 mtd_cur = &card->mtd[card->partition];
532 mtd_cur->name = part_cur->name;
533 mtd_cur->type = 8;
534 mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
535 mtd_cur->size = part_cur->numblocks * card->blocklen;
536 mtd_cur->erasesize = card->blocklen;
537 mtd_cur->_write = vmu_flash_write;
538 mtd_cur->_read = vmu_flash_read;
539 mtd_cur->_sync = vmu_flash_sync;
540 mtd_cur->writesize = card->blocklen;
541
542 mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
543 if (!mpart)
544 goto fail_mpart;
545
546 mpart->mdev = mdev;
547 mpart->partition = card->partition;
548 mtd_cur->priv = mpart;
549 mtd_cur->owner = THIS_MODULE;
550
551 pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
552 if (!pcache)
553 goto fail_cache_create;
554 part_cur->pcache = pcache;
555
556 error = mtd_device_register(mtd_cur, NULL, 0);
557 if (error)
558 goto fail_mtd_register;
559
560 maple_getcond_callback(mdev, NULL, 0,
561 MAPLE_FUNC_MEMCARD);
562
563 /*
564 * Set up a recursive call to the (probably theoretical)
565 * second or more partition
566 */
567 if (++card->partition < card->partitions) {
568 partnum = cpu_to_be32(card->partition << 24);
569 maple_getcond_callback(mdev, vmu_queryblocks, 0,
570 MAPLE_FUNC_MEMCARD);
571 maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
572 MAPLE_COMMAND_GETMINFO, 2, &partnum);
573 }
574 return;
575
576 fail_mtd_register:
577 dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
578 "error is 0x%X\n", mdev->port, mdev->unit, error);
579 for (error = 0; error <= card->partition; error++) {
580 kfree(((card->parts)[error]).pcache);
581 ((card->parts)[error]).pcache = NULL;
582 }
583 fail_cache_create:
584 fail_mpart:
585 for (error = 0; error <= card->partition; error++) {
586 kfree(((card->mtd)[error]).priv);
587 ((card->mtd)[error]).priv = NULL;
588 }
589 maple_getcond_callback(mdev, NULL, 0,
590 MAPLE_FUNC_MEMCARD);
591 kfree(part_cur->name);
592 fail_name:
593 return;
594 }
595
596 /* Handles very basic info about the flash, queries for details */
vmu_connect(struct maple_device * mdev)597 static int vmu_connect(struct maple_device *mdev)
598 {
599 unsigned long test_flash_data, basic_flash_data;
600 int c, error;
601 struct memcard *card;
602 u32 partnum = 0;
603
604 test_flash_data = be32_to_cpu(mdev->devinfo.function);
605 /* Need to count how many bits are set - to find out which
606 * function_data element has details of the memory card
607 */
608 c = hweight_long(test_flash_data);
609
610 basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
611
612 card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
613 if (!card) {
614 error = -ENOMEM;
615 goto fail_nomem;
616 }
617
618 card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
619 card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
620 card->writecnt = basic_flash_data >> 12 & 0xF;
621 card->readcnt = basic_flash_data >> 8 & 0xF;
622 card->removable = basic_flash_data >> 7 & 1;
623
624 card->partition = 0;
625
626 /*
627 * Not sure there are actually any multi-partition devices in the
628 * real world, but the hardware supports them, so, so will we
629 */
630 card->parts = kmalloc_array(card->partitions, sizeof(struct vmupart),
631 GFP_KERNEL);
632 if (!card->parts) {
633 error = -ENOMEM;
634 goto fail_partitions;
635 }
636
637 card->mtd = kmalloc_array(card->partitions, sizeof(struct mtd_info),
638 GFP_KERNEL);
639 if (!card->mtd) {
640 error = -ENOMEM;
641 goto fail_mtd_info;
642 }
643
644 maple_set_drvdata(mdev, card);
645
646 /*
647 * We want to trap meminfo not get cond
648 * so set interval to zero, but rely on maple bus
649 * driver to pass back the results of the meminfo
650 */
651 maple_getcond_callback(mdev, vmu_queryblocks, 0,
652 MAPLE_FUNC_MEMCARD);
653
654 /* Make sure we are clear to go */
655 if (atomic_read(&mdev->busy) == 1) {
656 wait_event_interruptible_timeout(mdev->maple_wait,
657 atomic_read(&mdev->busy) == 0, HZ);
658 if (atomic_read(&mdev->busy) == 1) {
659 dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
660 mdev->port, mdev->unit);
661 error = -EAGAIN;
662 goto fail_device_busy;
663 }
664 }
665
666 atomic_set(&mdev->busy, 1);
667
668 /*
669 * Set up the minfo call: vmu_queryblocks will handle
670 * the information passed back
671 */
672 error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
673 MAPLE_COMMAND_GETMINFO, 2, &partnum);
674 if (error) {
675 dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
676 " error is 0x%X\n", mdev->port, mdev->unit, error);
677 goto fail_mtd_info;
678 }
679 return 0;
680
681 fail_device_busy:
682 kfree(card->mtd);
683 fail_mtd_info:
684 kfree(card->parts);
685 fail_partitions:
686 kfree(card);
687 fail_nomem:
688 return error;
689 }
690
vmu_disconnect(struct maple_device * mdev)691 static void vmu_disconnect(struct maple_device *mdev)
692 {
693 struct memcard *card;
694 struct mdev_part *mpart;
695 int x;
696
697 mdev->callback = NULL;
698 card = maple_get_drvdata(mdev);
699 for (x = 0; x < card->partitions; x++) {
700 mpart = ((card->mtd)[x]).priv;
701 mpart->mdev = NULL;
702 mtd_device_unregister(&((card->mtd)[x]));
703 kfree(((card->parts)[x]).name);
704 }
705 kfree(card->parts);
706 kfree(card->mtd);
707 kfree(card);
708 }
709
710 /* Callback to handle eccentricities of both mtd subsystem
711 * and general flakyness of Dreamcast VMUs
712 */
vmu_can_unload(struct maple_device * mdev)713 static int vmu_can_unload(struct maple_device *mdev)
714 {
715 struct memcard *card;
716 int x;
717 struct mtd_info *mtd;
718
719 card = maple_get_drvdata(mdev);
720 for (x = 0; x < card->partitions; x++) {
721 mtd = &((card->mtd)[x]);
722 if (kref_read(&mtd->refcnt))
723 return 0;
724 }
725 return 1;
726 }
727
728 #define ERRSTR "VMU at (%d, %d) file error -"
729
vmu_file_error(struct maple_device * mdev,void * recvbuf)730 static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
731 {
732 enum maple_file_errors error = ((int *)recvbuf)[1];
733
734 switch (error) {
735
736 case MAPLE_FILEERR_INVALID_PARTITION:
737 dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
738 mdev->port, mdev->unit);
739 break;
740
741 case MAPLE_FILEERR_PHASE_ERROR:
742 dev_notice(&mdev->dev, ERRSTR " phase error\n",
743 mdev->port, mdev->unit);
744 break;
745
746 case MAPLE_FILEERR_INVALID_BLOCK:
747 dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
748 mdev->port, mdev->unit);
749 break;
750
751 case MAPLE_FILEERR_WRITE_ERROR:
752 dev_notice(&mdev->dev, ERRSTR " write error\n",
753 mdev->port, mdev->unit);
754 break;
755
756 case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
757 dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
758 mdev->port, mdev->unit);
759 break;
760
761 case MAPLE_FILEERR_BAD_CRC:
762 dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
763 mdev->port, mdev->unit);
764 break;
765
766 default:
767 dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
768 mdev->port, mdev->unit, error);
769 }
770 }
771
772
probe_maple_vmu(struct device * dev)773 static int probe_maple_vmu(struct device *dev)
774 {
775 struct maple_device *mdev = to_maple_dev(dev);
776 struct maple_driver *mdrv = to_maple_driver(dev->driver);
777
778 mdev->can_unload = vmu_can_unload;
779 mdev->fileerr_handler = vmu_file_error;
780 mdev->driver = mdrv;
781
782 return vmu_connect(mdev);
783 }
784
remove_maple_vmu(struct device * dev)785 static int remove_maple_vmu(struct device *dev)
786 {
787 struct maple_device *mdev = to_maple_dev(dev);
788
789 vmu_disconnect(mdev);
790 return 0;
791 }
792
793 static struct maple_driver vmu_flash_driver = {
794 .function = MAPLE_FUNC_MEMCARD,
795 .drv = {
796 .name = "Dreamcast_visual_memory",
797 .probe = probe_maple_vmu,
798 .remove = remove_maple_vmu,
799 },
800 };
801
vmu_flash_map_init(void)802 static int __init vmu_flash_map_init(void)
803 {
804 return maple_driver_register(&vmu_flash_driver);
805 }
806
vmu_flash_map_exit(void)807 static void __exit vmu_flash_map_exit(void)
808 {
809 maple_driver_unregister(&vmu_flash_driver);
810 }
811
812 module_init(vmu_flash_map_init);
813 module_exit(vmu_flash_map_exit);
814
815 MODULE_LICENSE("GPL");
816 MODULE_AUTHOR("Adrian McMenamin");
817 MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");
818