1 /*-
2 * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com>
3 * All rights reserved.
4 *
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * Synopsys DesignWare Mobile Storage Host Controller
33 * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34 */
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/conf.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/module.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/rman.h>
47 #include <sys/queue.h>
48 #include <sys/taskqueue.h>
49
50 #include <dev/mmc/bridge.h>
51 #include <dev/mmc/mmcbrvar.h>
52 #include <dev/mmc/mmc_fdt_helpers.h>
53
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/openfirm.h>
56 #include <dev/ofw/ofw_bus.h>
57 #include <dev/ofw/ofw_bus_subr.h>
58
59 #include <machine/bus.h>
60 #include <machine/cpu.h>
61 #include <machine/intr.h>
62
63 #include <dev/clk/clk.h>
64
65 #include <dev/mmc/host/dwmmc_reg.h>
66 #include <dev/mmc/host/dwmmc_var.h>
67
68 #include "opt_mmccam.h"
69
70 #ifdef MMCCAM
71 #include <cam/cam.h>
72 #include <cam/cam_ccb.h>
73 #include <cam/cam_debug.h>
74 #include <cam/cam_sim.h>
75 #include <cam/cam_xpt_sim.h>
76
77 #include "mmc_sim_if.h"
78 #endif
79
80 #include "mmcbr_if.h"
81
82 #ifdef DEBUG
83 #define dprintf(fmt, args...) printf(fmt, ##args)
84 #else
85 #define dprintf(x, arg...)
86 #endif
87
88 #define READ4(_sc, _reg) \
89 bus_read_4((_sc)->res[0], _reg)
90 #define WRITE4(_sc, _reg, _val) \
91 bus_write_4((_sc)->res[0], _reg, _val)
92
93 #define DIV_ROUND_UP(n, d) howmany(n, d)
94
95 #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
96 #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
97 #define DWMMC_LOCK_INIT(_sc) \
98 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
99 "dwmmc", MTX_DEF)
100 #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
101 #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
102 #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
103
104 #define PENDING_CMD 0x01
105 #define PENDING_STOP 0x02
106 #define CARD_INIT_DONE 0x04
107
108 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
109 |SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE)
110 #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
111 |SDMMC_INTMASK_RE)
112 #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
113 |SDMMC_INTMASK_HLE)
114
115 #define DES0_DIC (1 << 1) /* Disable Interrupt on Completion */
116 #define DES0_LD (1 << 2) /* Last Descriptor */
117 #define DES0_FS (1 << 3) /* First Descriptor */
118 #define DES0_CH (1 << 4) /* second address CHained */
119 #define DES0_ER (1 << 5) /* End of Ring */
120 #define DES0_CES (1 << 30) /* Card Error Summary */
121 #define DES0_OWN (1 << 31) /* OWN */
122
123 #define DES1_BS1_MASK 0x1fff
124
125 struct idmac_desc {
126 uint32_t des0; /* control */
127 uint32_t des1; /* bufsize */
128 uint32_t des2; /* buf1 phys addr */
129 uint32_t des3; /* buf2 phys addr or next descr */
130 };
131
132 #define IDMAC_DESC_SEGS (PAGE_SIZE / (sizeof(struct idmac_desc)))
133 #define IDMAC_DESC_SIZE (sizeof(struct idmac_desc) * IDMAC_DESC_SEGS)
134 #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */
135 /*
136 * Size field in DMA descriptor is 13 bits long (up to 4095 bytes),
137 * but must be a multiple of the data bus size.Additionally, we must ensure
138 * that bus_dmamap_load() doesn't additionally fragments buffer (because it
139 * is processed with page size granularity). Thus limit fragment size to half
140 * of page.
141 * XXX switch descriptor format to array and use second buffer pointer for
142 * second half of page
143 */
144 #define IDMAC_MAX_SIZE 2048
145 /*
146 * Busdma may bounce buffers, so we must reserve 2 descriptors
147 * (on start and on end) for bounced fragments.
148 */
149 #define DWMMC_MAX_DATA (IDMAC_MAX_SIZE * (IDMAC_DESC_SEGS - 2)) / MMC_SECTOR_SIZE
150
151 static void dwmmc_next_operation(struct dwmmc_softc *);
152 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
153 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
154 static int dma_stop(struct dwmmc_softc *);
155 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
156 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
157 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present);
158
159 static struct resource_spec dwmmc_spec[] = {
160 { SYS_RES_MEMORY, 0, RF_ACTIVE },
161 { SYS_RES_IRQ, 0, RF_ACTIVE },
162 { -1, 0 }
163 };
164
165 #define HWTYPE_MASK (0x0000ffff)
166 #define HWFLAG_MASK (0xffff << 16)
167
168 static void
dwmmc_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)169 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
170 {
171
172 if (nsegs != 1)
173 panic("%s: nsegs != 1 (%d)\n", __func__, nsegs);
174 if (error != 0)
175 panic("%s: error != 0 (%d)\n", __func__, error);
176
177 *(bus_addr_t *)arg = segs[0].ds_addr;
178 }
179
180 static void
dwmmc_ring_setup(void * arg,bus_dma_segment_t * segs,int nsegs,int error)181 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
182 {
183 struct dwmmc_softc *sc;
184 int idx;
185
186 sc = arg;
187 dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
188 if (error != 0)
189 panic("%s: error != 0 (%d)\n", __func__, error);
190
191 for (idx = 0; idx < nsegs; idx++) {
192 sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH;
193 sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK;
194 sc->desc_ring[idx].des2 = segs[idx].ds_addr;
195
196 if (idx == 0)
197 sc->desc_ring[idx].des0 |= DES0_FS;
198
199 if (idx == (nsegs - 1)) {
200 sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
201 sc->desc_ring[idx].des0 |= DES0_LD;
202 }
203 wmb();
204 sc->desc_ring[idx].des0 |= DES0_OWN;
205 }
206 }
207
208 static int
dwmmc_ctrl_reset(struct dwmmc_softc * sc,int reset_bits)209 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
210 {
211 int reg;
212 int i;
213
214 reg = READ4(sc, SDMMC_CTRL);
215 reg |= (reset_bits);
216 WRITE4(sc, SDMMC_CTRL, reg);
217
218 /* Wait reset done */
219 for (i = 0; i < 100; i++) {
220 if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
221 return (0);
222 DELAY(10);
223 }
224
225 device_printf(sc->dev, "Reset failed\n");
226
227 return (1);
228 }
229
230 static int
dma_setup(struct dwmmc_softc * sc)231 dma_setup(struct dwmmc_softc *sc)
232 {
233 int error;
234 int nidx;
235 int idx;
236
237 /*
238 * Set up TX descriptor ring, descriptors, and dma maps.
239 */
240 error = bus_dma_tag_create(
241 bus_get_dma_tag(sc->dev), /* Parent tag. */
242 4096, 0, /* alignment, boundary */
243 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
244 BUS_SPACE_MAXADDR, /* highaddr */
245 NULL, NULL, /* filter, filterarg */
246 IDMAC_DESC_SIZE, 1, /* maxsize, nsegments */
247 IDMAC_DESC_SIZE, /* maxsegsize */
248 0, /* flags */
249 NULL, NULL, /* lockfunc, lockarg */
250 &sc->desc_tag);
251 if (error != 0) {
252 device_printf(sc->dev,
253 "could not create ring DMA tag.\n");
254 return (1);
255 }
256
257 error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
258 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
259 &sc->desc_map);
260 if (error != 0) {
261 device_printf(sc->dev,
262 "could not allocate descriptor ring.\n");
263 return (1);
264 }
265
266 error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
267 sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr,
268 &sc->desc_ring_paddr, 0);
269 if (error != 0) {
270 device_printf(sc->dev,
271 "could not load descriptor ring map.\n");
272 return (1);
273 }
274
275 for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) {
276 sc->desc_ring[idx].des0 = DES0_CH;
277 sc->desc_ring[idx].des1 = 0;
278 nidx = (idx + 1) % IDMAC_DESC_SEGS;
279 sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
280 (nidx * sizeof(struct idmac_desc));
281 }
282 sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr;
283 sc->desc_ring[idx - 1].des0 |= DES0_ER;
284
285 error = bus_dma_tag_create(
286 bus_get_dma_tag(sc->dev), /* Parent tag. */
287 8, 0, /* alignment, boundary */
288 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
289 BUS_SPACE_MAXADDR, /* highaddr */
290 NULL, NULL, /* filter, filterarg */
291 IDMAC_MAX_SIZE * IDMAC_DESC_SEGS, /* maxsize */
292 IDMAC_DESC_SEGS, /* nsegments */
293 IDMAC_MAX_SIZE, /* maxsegsize */
294 0, /* flags */
295 NULL, NULL, /* lockfunc, lockarg */
296 &sc->buf_tag);
297 if (error != 0) {
298 device_printf(sc->dev,
299 "could not create ring DMA tag.\n");
300 return (1);
301 }
302
303 error = bus_dmamap_create(sc->buf_tag, 0,
304 &sc->buf_map);
305 if (error != 0) {
306 device_printf(sc->dev,
307 "could not create TX buffer DMA map.\n");
308 return (1);
309 }
310
311 return (0);
312 }
313
314 static void
dwmmc_cmd_done(struct dwmmc_softc * sc)315 dwmmc_cmd_done(struct dwmmc_softc *sc)
316 {
317 struct mmc_command *cmd;
318
319 DWMMC_ASSERT_LOCKED(sc);
320
321 cmd = sc->curcmd;
322 KASSERT(cmd != NULL, ("%s: sc %p curcmd %p == NULL", __func__, sc, cmd));
323
324 if (cmd->flags & MMC_RSP_PRESENT) {
325 if (cmd->flags & MMC_RSP_136) {
326 cmd->resp[3] = READ4(sc, SDMMC_RESP0);
327 cmd->resp[2] = READ4(sc, SDMMC_RESP1);
328 cmd->resp[1] = READ4(sc, SDMMC_RESP2);
329 cmd->resp[0] = READ4(sc, SDMMC_RESP3);
330 } else {
331 cmd->resp[3] = 0;
332 cmd->resp[2] = 0;
333 cmd->resp[1] = 0;
334 cmd->resp[0] = READ4(sc, SDMMC_RESP0);
335 }
336 }
337 }
338
339 static void
dwmmc_tasklet(struct dwmmc_softc * sc)340 dwmmc_tasklet(struct dwmmc_softc *sc)
341 {
342 struct mmc_command *cmd;
343
344 DWMMC_ASSERT_LOCKED(sc);
345
346 cmd = sc->curcmd;
347 KASSERT(cmd != NULL, ("%s: sc %p curcmd %p == NULL", __func__, sc, cmd));
348
349 if (!sc->cmd_done)
350 return;
351
352 if (cmd->error != MMC_ERR_NONE || !cmd->data) {
353 dwmmc_next_operation(sc);
354
355 } else if (cmd->data && sc->dto_rcvd) {
356 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
357 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
358 sc->use_auto_stop) {
359 if (sc->acd_rcvd)
360 dwmmc_next_operation(sc);
361 } else {
362 dwmmc_next_operation(sc);
363 }
364 }
365 }
366
367 static void
dwmmc_intr(void * arg)368 dwmmc_intr(void *arg)
369 {
370 struct mmc_command *cmd;
371 struct dwmmc_softc *sc;
372 uint32_t reg;
373
374 sc = arg;
375
376 DWMMC_LOCK(sc);
377
378 cmd = sc->curcmd;
379 KASSERT(cmd != NULL, ("%s: sc %p curcmd %p == NULL", __func__, sc, cmd));
380
381 /* First handle SDMMC controller interrupts */
382 reg = READ4(sc, SDMMC_MINTSTS);
383 if (reg) {
384 dprintf("%s 0x%08x\n", __func__, reg);
385
386 if (reg & DWMMC_CMD_ERR_FLAGS) {
387 dprintf("cmd err 0x%08x cmd 0x%08x\n",
388 reg, cmd->opcode);
389 cmd->error = MMC_ERR_TIMEOUT;
390 }
391
392 if (reg & DWMMC_DATA_ERR_FLAGS) {
393 dprintf("data err 0x%08x cmd 0x%08x\n",
394 reg, cmd->opcode);
395 cmd->error = MMC_ERR_FAILED;
396 if (!sc->use_pio) {
397 dma_done(sc, cmd);
398 dma_stop(sc);
399 }
400 }
401
402 if (reg & SDMMC_INTMASK_CMD_DONE) {
403 dwmmc_cmd_done(sc);
404 sc->cmd_done = 1;
405 }
406
407 if (reg & SDMMC_INTMASK_ACD)
408 sc->acd_rcvd = 1;
409
410 if (reg & SDMMC_INTMASK_DTO)
411 sc->dto_rcvd = 1;
412
413 if (reg & SDMMC_INTMASK_CD) {
414 dwmmc_handle_card_present(sc,
415 READ4(sc, SDMMC_CDETECT) == 0 ? true : false);
416 }
417 }
418
419 /* Ack interrupts */
420 WRITE4(sc, SDMMC_RINTSTS, reg);
421
422 if (sc->use_pio) {
423 if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
424 pio_read(sc, cmd);
425 }
426 if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
427 pio_write(sc, cmd);
428 }
429 } else {
430 /* Now handle DMA interrupts */
431 reg = READ4(sc, SDMMC_IDSTS);
432 if (reg) {
433 dprintf("dma intr 0x%08x\n", reg);
434 if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
435 WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
436 SDMMC_IDINTEN_RI));
437 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
438 dma_done(sc, cmd);
439 }
440 }
441 }
442
443 dwmmc_tasklet(sc);
444
445 DWMMC_UNLOCK(sc);
446 }
447
448 static void
dwmmc_handle_card_present(struct dwmmc_softc * sc,bool is_present)449 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
450 {
451 bool was_present;
452
453 if (dumping || SCHEDULER_STOPPED())
454 return;
455
456 was_present = sc->child != NULL;
457
458 if (!was_present && is_present) {
459 taskqueue_enqueue_timeout(taskqueue_bus,
460 &sc->card_delayed_task, -(hz / 2));
461 } else if (was_present && !is_present) {
462 taskqueue_enqueue(taskqueue_bus, &sc->card_task);
463 }
464 }
465
466 static void
dwmmc_card_task(void * arg,int pending __unused)467 dwmmc_card_task(void *arg, int pending __unused)
468 {
469 struct dwmmc_softc *sc = arg;
470
471 #ifdef MMCCAM
472 mmc_cam_sim_discover(&sc->mmc_sim);
473 #else
474 bus_topo_lock();
475 if (READ4(sc, SDMMC_CDETECT) == 0 ||
476 (sc->mmc_helper.props & MMC_PROP_BROKEN_CD)) {
477 if (sc->child == NULL) {
478 if (bootverbose)
479 device_printf(sc->dev, "Card inserted\n");
480
481 sc->child = device_add_child(sc->dev, "mmc", DEVICE_UNIT_ANY);
482 if (sc->child) {
483 device_set_ivars(sc->child, sc);
484 (void)device_probe_and_attach(sc->child);
485 }
486 }
487 } else {
488 /* Card isn't present, detach if necessary */
489 if (sc->child != NULL) {
490 if (bootverbose)
491 device_printf(sc->dev, "Card removed\n");
492
493 device_delete_child(sc->dev, sc->child);
494 sc->child = NULL;
495 }
496 }
497 bus_topo_unlock();
498 #endif /* MMCCAM */
499 }
500
501 static int
parse_fdt(struct dwmmc_softc * sc)502 parse_fdt(struct dwmmc_softc *sc)
503 {
504 pcell_t dts_value[3];
505 phandle_t node;
506 uint32_t bus_hz = 0;
507 int len;
508 int error;
509
510 if ((node = ofw_bus_get_node(sc->dev)) == -1)
511 return (ENXIO);
512
513 /* Set some defaults for freq and supported mode */
514 sc->host.f_min = 400000;
515 sc->host.f_max = 200000000;
516 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
517 sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
518 mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host);
519
520 /* fifo-depth */
521 if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
522 OF_getencprop(node, "fifo-depth", dts_value, len);
523 sc->fifo_depth = dts_value[0];
524 }
525
526 /* num-slots (Deprecated) */
527 sc->num_slots = 1;
528 if ((len = OF_getproplen(node, "num-slots")) > 0) {
529 device_printf(sc->dev, "num-slots property is deprecated\n");
530 OF_getencprop(node, "num-slots", dts_value, len);
531 sc->num_slots = dts_value[0];
532 }
533
534 /* clock-frequency */
535 if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
536 OF_getencprop(node, "clock-frequency", dts_value, len);
537 bus_hz = dts_value[0];
538 }
539
540 /* IP block reset is optional */
541 error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset);
542 if (error != 0 &&
543 error != ENOENT &&
544 error != ENODEV) {
545 device_printf(sc->dev, "Cannot get reset\n");
546 goto fail;
547 }
548
549 /* vmmc regulator is optional */
550 error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply",
551 &sc->vmmc);
552 if (error != 0 &&
553 error != ENOENT &&
554 error != ENODEV) {
555 device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n");
556 goto fail;
557 }
558
559 /* vqmmc regulator is optional */
560 error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply",
561 &sc->vqmmc);
562 if (error != 0 &&
563 error != ENOENT &&
564 error != ENODEV) {
565 device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n");
566 goto fail;
567 }
568
569 /* Assert reset first */
570 if (sc->hwreset != NULL) {
571 error = hwreset_assert(sc->hwreset);
572 if (error != 0) {
573 device_printf(sc->dev, "Cannot assert reset\n");
574 goto fail;
575 }
576 }
577
578 /* BIU (Bus Interface Unit clock) is optional */
579 error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
580 if (error != 0 &&
581 error != ENOENT &&
582 error != ENODEV) {
583 device_printf(sc->dev, "Cannot get 'biu' clock\n");
584 goto fail;
585 }
586
587 if (sc->biu) {
588 error = clk_enable(sc->biu);
589 if (error != 0) {
590 device_printf(sc->dev, "cannot enable biu clock\n");
591 goto fail;
592 }
593 }
594
595 /*
596 * CIU (Controller Interface Unit clock) is mandatory
597 * if no clock-frequency property is given
598 */
599 error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
600 if (error != 0 &&
601 error != ENOENT &&
602 error != ENODEV) {
603 device_printf(sc->dev, "Cannot get 'ciu' clock\n");
604 goto fail;
605 }
606
607 if (sc->ciu) {
608 if (bus_hz != 0) {
609 error = clk_set_freq(sc->ciu, bus_hz, 0);
610 if (error != 0)
611 device_printf(sc->dev,
612 "cannot set ciu clock to %u\n", bus_hz);
613 }
614 error = clk_enable(sc->ciu);
615 if (error != 0) {
616 device_printf(sc->dev, "cannot enable ciu clock\n");
617 goto fail;
618 }
619 clk_get_freq(sc->ciu, &sc->bus_hz);
620 }
621
622 /* Enable regulators */
623 if (sc->vmmc != NULL) {
624 error = regulator_enable(sc->vmmc);
625 if (error != 0) {
626 device_printf(sc->dev, "Cannot enable vmmc regulator\n");
627 goto fail;
628 }
629 }
630 if (sc->vqmmc != NULL) {
631 error = regulator_enable(sc->vqmmc);
632 if (error != 0) {
633 device_printf(sc->dev, "Cannot enable vqmmc regulator\n");
634 goto fail;
635 }
636 }
637
638 /* Take dwmmc out of reset */
639 if (sc->hwreset != NULL) {
640 error = hwreset_deassert(sc->hwreset);
641 if (error != 0) {
642 device_printf(sc->dev, "Cannot deassert reset\n");
643 goto fail;
644 }
645 }
646
647 if (sc->bus_hz == 0) {
648 device_printf(sc->dev, "No bus speed provided\n");
649 goto fail;
650 }
651
652 return (0);
653
654 fail:
655 return (ENXIO);
656 }
657
658 int
dwmmc_attach(device_t dev)659 dwmmc_attach(device_t dev)
660 {
661 struct dwmmc_softc *sc;
662 int error;
663
664 sc = device_get_softc(dev);
665
666 sc->dev = dev;
667
668 /* Why not to use Auto Stop? It save a hundred of irq per second */
669 sc->use_auto_stop = 1;
670
671 error = parse_fdt(sc);
672 if (error != 0) {
673 device_printf(dev, "Can't get FDT property.\n");
674 return (ENXIO);
675 }
676
677 DWMMC_LOCK_INIT(sc);
678
679 if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
680 device_printf(dev, "could not allocate resources\n");
681 return (ENXIO);
682 }
683
684 /* Setup interrupt handler. */
685 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
686 NULL, dwmmc_intr, sc, &sc->intr_cookie);
687 if (error != 0) {
688 device_printf(dev, "could not setup interrupt handler.\n");
689 return (ENXIO);
690 }
691
692 device_printf(dev, "Hardware version ID is %04x\n",
693 READ4(sc, SDMMC_VERID) & 0xffff);
694
695 /* Reset all */
696 if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
697 SDMMC_CTRL_FIFO_RESET |
698 SDMMC_CTRL_DMA_RESET)))
699 return (ENXIO);
700
701 dwmmc_setup_bus(sc, sc->host.f_min);
702
703 if (sc->fifo_depth == 0) {
704 sc->fifo_depth = 1 +
705 ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
706 device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
707 sc->fifo_depth);
708 }
709
710 if (!sc->use_pio) {
711 dma_stop(sc);
712 if (dma_setup(sc))
713 return (ENXIO);
714
715 /* Install desc base */
716 WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
717
718 /* Enable DMA interrupts */
719 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
720 WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
721 SDMMC_IDINTEN_RI |
722 SDMMC_IDINTEN_TI));
723 }
724
725 /* Clear and disable interrups for a while */
726 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
727 WRITE4(sc, SDMMC_INTMASK, 0);
728
729 /* Maximum timeout */
730 WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
731
732 /* Enable interrupts */
733 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
734 WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
735 SDMMC_INTMASK_DTO |
736 SDMMC_INTMASK_ACD |
737 SDMMC_INTMASK_TXDR |
738 SDMMC_INTMASK_RXDR |
739 DWMMC_ERR_FLAGS |
740 SDMMC_INTMASK_CD));
741 WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
742
743 TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
744 TIMEOUT_TASK_INIT(taskqueue_bus, &sc->card_delayed_task, 0,
745 dwmmc_card_task, sc);
746
747 #ifdef MMCCAM
748 sc->ccb = NULL;
749 if (mmc_cam_sim_alloc(dev, "dw_mmc", &sc->mmc_sim) != 0) {
750 device_printf(dev, "cannot alloc cam sim\n");
751 dwmmc_detach(dev);
752 return (ENXIO);
753 }
754 #endif
755 /*
756 * Schedule a card detection as we won't get an interrupt
757 * if the card is inserted when we attach
758 */
759 dwmmc_card_task(sc, 0);
760 return (0);
761 }
762
763 int
dwmmc_detach(device_t dev)764 dwmmc_detach(device_t dev)
765 {
766 struct dwmmc_softc *sc;
767 int ret;
768
769 sc = device_get_softc(dev);
770
771 ret = bus_generic_detach(dev);
772 if (ret != 0)
773 return (ret);
774
775 taskqueue_drain(taskqueue_bus, &sc->card_task);
776 taskqueue_drain_timeout(taskqueue_bus, &sc->card_delayed_task);
777
778 if (sc->intr_cookie != NULL) {
779 ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
780 if (ret != 0)
781 return (ret);
782 }
783 bus_release_resources(dev, dwmmc_spec, sc->res);
784
785 DWMMC_LOCK_DESTROY(sc);
786
787 if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0)
788 device_printf(sc->dev, "cannot deassert reset\n");
789 if (sc->biu != NULL && clk_disable(sc->biu) != 0)
790 device_printf(sc->dev, "cannot disable biu clock\n");
791 if (sc->ciu != NULL && clk_disable(sc->ciu) != 0)
792 device_printf(sc->dev, "cannot disable ciu clock\n");
793
794 if (sc->vmmc && regulator_disable(sc->vmmc) != 0)
795 device_printf(sc->dev, "Cannot disable vmmc regulator\n");
796 if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0)
797 device_printf(sc->dev, "Cannot disable vqmmc regulator\n");
798
799 #ifdef MMCCAM
800 mmc_cam_sim_free(&sc->mmc_sim);
801 #endif
802
803 return (0);
804 }
805
806 static int
dwmmc_setup_bus(struct dwmmc_softc * sc,int freq)807 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
808 {
809 int tout;
810 int div;
811
812 if (freq == 0) {
813 WRITE4(sc, SDMMC_CLKENA, 0);
814 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
815 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
816
817 tout = 1000;
818 do {
819 if (tout-- < 0) {
820 device_printf(sc->dev, "Failed update clk\n");
821 return (1);
822 }
823 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
824
825 return (0);
826 }
827
828 WRITE4(sc, SDMMC_CLKENA, 0);
829 WRITE4(sc, SDMMC_CLKSRC, 0);
830
831 div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
832
833 WRITE4(sc, SDMMC_CLKDIV, div);
834 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
835 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
836
837 tout = 1000;
838 do {
839 if (tout-- < 0) {
840 device_printf(sc->dev, "Failed to update clk\n");
841 return (1);
842 }
843 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
844
845 WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
846 WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
847 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
848
849 tout = 1000;
850 do {
851 if (tout-- < 0) {
852 device_printf(sc->dev, "Failed to enable clk\n");
853 return (1);
854 }
855 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
856
857 return (0);
858 }
859
860 static int
dwmmc_update_ios(device_t brdev,device_t reqdev)861 dwmmc_update_ios(device_t brdev, device_t reqdev)
862 {
863 struct dwmmc_softc *sc;
864 struct mmc_ios *ios;
865 uint32_t reg;
866 int ret = 0;
867
868 sc = device_get_softc(brdev);
869 ios = &sc->host.ios;
870
871 dprintf("Setting up clk %u bus_width %d, timing: %d\n",
872 ios->clock, ios->bus_width, ios->timing);
873
874 switch (ios->power_mode) {
875 case power_on:
876 break;
877 case power_off:
878 WRITE4(sc, SDMMC_PWREN, 0);
879 break;
880 case power_up:
881 WRITE4(sc, SDMMC_PWREN, 1);
882 break;
883 }
884
885 mmc_fdt_set_power(&sc->mmc_helper, ios->power_mode);
886
887 if (ios->bus_width == bus_width_8)
888 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
889 else if (ios->bus_width == bus_width_4)
890 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
891 else
892 WRITE4(sc, SDMMC_CTYPE, 0);
893
894 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
895 /* XXX: take care about DDR or SDR use here */
896 WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
897 }
898
899 /* Set DDR mode */
900 reg = READ4(sc, SDMMC_UHS_REG);
901 if (ios->timing == bus_timing_uhs_ddr50 ||
902 ios->timing == bus_timing_mmc_ddr52 ||
903 ios->timing == bus_timing_mmc_hs400)
904 reg |= (SDMMC_UHS_REG_DDR);
905 else
906 reg &= ~(SDMMC_UHS_REG_DDR);
907 WRITE4(sc, SDMMC_UHS_REG, reg);
908
909 if (sc->update_ios)
910 ret = sc->update_ios(sc, ios);
911
912 dwmmc_setup_bus(sc, ios->clock);
913
914 return (ret);
915 }
916
917 static int
dma_done(struct dwmmc_softc * sc,struct mmc_command * cmd)918 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
919 {
920 struct mmc_data *data;
921
922 data = cmd->data;
923
924 if (data->flags & MMC_DATA_WRITE)
925 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
926 BUS_DMASYNC_POSTWRITE);
927 else
928 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
929 BUS_DMASYNC_POSTREAD);
930
931 bus_dmamap_sync(sc->desc_tag, sc->desc_map,
932 BUS_DMASYNC_POSTWRITE);
933
934 bus_dmamap_unload(sc->buf_tag, sc->buf_map);
935
936 return (0);
937 }
938
939 static int
dma_stop(struct dwmmc_softc * sc)940 dma_stop(struct dwmmc_softc *sc)
941 {
942 int reg;
943
944 reg = READ4(sc, SDMMC_CTRL);
945 reg &= ~(SDMMC_CTRL_USE_IDMAC);
946 reg |= (SDMMC_CTRL_DMA_RESET);
947 WRITE4(sc, SDMMC_CTRL, reg);
948
949 reg = READ4(sc, SDMMC_BMOD);
950 reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
951 reg |= (SDMMC_BMOD_SWR);
952 WRITE4(sc, SDMMC_BMOD, reg);
953
954 return (0);
955 }
956
957 static int
dma_prepare(struct dwmmc_softc * sc,struct mmc_command * cmd)958 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
959 {
960 struct mmc_data *data;
961 int err;
962 int reg;
963
964 data = cmd->data;
965
966 reg = READ4(sc, SDMMC_INTMASK);
967 reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
968 WRITE4(sc, SDMMC_INTMASK, reg);
969 dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len);
970 err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
971 data->data, data->len, dwmmc_ring_setup,
972 sc, BUS_DMA_NOWAIT);
973 if (err != 0)
974 panic("dmamap_load failed\n");
975
976 /* Ensure the device can see the desc */
977 bus_dmamap_sync(sc->desc_tag, sc->desc_map,
978 BUS_DMASYNC_PREWRITE);
979
980 if (data->flags & MMC_DATA_WRITE)
981 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
982 BUS_DMASYNC_PREWRITE);
983 else
984 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
985 BUS_DMASYNC_PREREAD);
986
987 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
988 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
989 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
990
991 WRITE4(sc, SDMMC_FIFOTH, reg);
992 wmb();
993
994 reg = READ4(sc, SDMMC_CTRL);
995 reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
996 WRITE4(sc, SDMMC_CTRL, reg);
997 wmb();
998
999 reg = READ4(sc, SDMMC_BMOD);
1000 reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
1001 WRITE4(sc, SDMMC_BMOD, reg);
1002
1003 /* Start */
1004 WRITE4(sc, SDMMC_PLDMND, 1);
1005
1006 return (0);
1007 }
1008
1009 static int
pio_prepare(struct dwmmc_softc * sc,struct mmc_command * cmd)1010 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
1011 {
1012 struct mmc_data *data;
1013 int reg;
1014
1015 data = cmd->data;
1016 data->xfer_len = 0;
1017
1018 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1019 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1020 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1021
1022 WRITE4(sc, SDMMC_FIFOTH, reg);
1023 wmb();
1024
1025 return (0);
1026 }
1027
1028 static void
pio_read(struct dwmmc_softc * sc,struct mmc_command * cmd)1029 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
1030 {
1031 struct mmc_data *data;
1032 uint32_t *p, status;
1033
1034 if (cmd == NULL || cmd->data == NULL)
1035 return;
1036
1037 data = cmd->data;
1038 if ((data->flags & MMC_DATA_READ) == 0)
1039 return;
1040
1041 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1042 p = (uint32_t *)data->data + (data->xfer_len >> 2);
1043
1044 while (data->xfer_len < data->len) {
1045 status = READ4(sc, SDMMC_STATUS);
1046 if (status & SDMMC_STATUS_FIFO_EMPTY)
1047 break;
1048 *p++ = READ4(sc, SDMMC_DATA);
1049 data->xfer_len += 4;
1050 }
1051
1052 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
1053 }
1054
1055 static void
pio_write(struct dwmmc_softc * sc,struct mmc_command * cmd)1056 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
1057 {
1058 struct mmc_data *data;
1059 uint32_t *p, status;
1060
1061 if (cmd == NULL || cmd->data == NULL)
1062 return;
1063
1064 data = cmd->data;
1065 if ((data->flags & MMC_DATA_WRITE) == 0)
1066 return;
1067
1068 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1069 p = (uint32_t *)data->data + (data->xfer_len >> 2);
1070
1071 while (data->xfer_len < data->len) {
1072 status = READ4(sc, SDMMC_STATUS);
1073 if (status & SDMMC_STATUS_FIFO_FULL)
1074 break;
1075 WRITE4(sc, SDMMC_DATA, *p++);
1076 data->xfer_len += 4;
1077 }
1078
1079 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
1080 }
1081
1082 static void
dwmmc_start_cmd(struct dwmmc_softc * sc,struct mmc_command * cmd)1083 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
1084 {
1085 struct mmc_data *data;
1086 uint32_t blksz;
1087 uint32_t cmdr;
1088
1089 dprintf("%s\n", __func__);
1090
1091 DWMMC_ASSERT_LOCKED(sc);
1092
1093 sc->curcmd = cmd;
1094 data = cmd->data;
1095
1096 #ifndef MMCCAM
1097 /* XXX Upper layers don't always set this */
1098 cmd->mrq = sc->req;
1099 #endif
1100 /* Begin setting up command register. */
1101
1102 cmdr = cmd->opcode;
1103
1104 dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
1105
1106 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
1107 cmd->opcode == MMC_GO_IDLE_STATE ||
1108 cmd->opcode == MMC_GO_INACTIVE_STATE)
1109 cmdr |= SDMMC_CMD_STOP_ABORT;
1110 else if (cmd->opcode != MMC_SEND_STATUS && data)
1111 cmdr |= SDMMC_CMD_WAIT_PRVDATA;
1112
1113 /* Set up response handling. */
1114 if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
1115 cmdr |= SDMMC_CMD_RESP_EXP;
1116 if (cmd->flags & MMC_RSP_136)
1117 cmdr |= SDMMC_CMD_RESP_LONG;
1118 }
1119
1120 if (cmd->flags & MMC_RSP_CRC)
1121 cmdr |= SDMMC_CMD_RESP_CRC;
1122
1123 /*
1124 * XXX: Not all platforms want this.
1125 */
1126 cmdr |= SDMMC_CMD_USE_HOLD_REG;
1127
1128 if ((sc->flags & CARD_INIT_DONE) == 0) {
1129 sc->flags |= (CARD_INIT_DONE);
1130 cmdr |= SDMMC_CMD_SEND_INIT;
1131 }
1132
1133 if (data) {
1134 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1135 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
1136 sc->use_auto_stop)
1137 cmdr |= SDMMC_CMD_SEND_ASTOP;
1138
1139 cmdr |= SDMMC_CMD_DATA_EXP;
1140 if (data->flags & MMC_DATA_STREAM)
1141 cmdr |= SDMMC_CMD_MODE_STREAM;
1142 if (data->flags & MMC_DATA_WRITE)
1143 cmdr |= SDMMC_CMD_DATA_WRITE;
1144
1145 WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
1146 #ifdef MMCCAM
1147 if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1148 WRITE4(sc, SDMMC_BLKSIZ, cmd->data->block_size);
1149 WRITE4(sc, SDMMC_BYTCNT, cmd->data->len);
1150 } else
1151 #endif
1152 {
1153 WRITE4(sc, SDMMC_BYTCNT, data->len);
1154 blksz = (data->len < MMC_SECTOR_SIZE) ? \
1155 data->len : MMC_SECTOR_SIZE;
1156 WRITE4(sc, SDMMC_BLKSIZ, blksz);
1157 }
1158
1159 if (sc->use_pio) {
1160 pio_prepare(sc, cmd);
1161 } else {
1162 dma_prepare(sc, cmd);
1163 }
1164 wmb();
1165 }
1166
1167 dprintf("cmdr 0x%08x\n", cmdr);
1168
1169 WRITE4(sc, SDMMC_CMDARG, cmd->arg);
1170 wmb();
1171 WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
1172 };
1173
1174 static void
dwmmc_next_operation(struct dwmmc_softc * sc)1175 dwmmc_next_operation(struct dwmmc_softc *sc)
1176 {
1177 #ifdef MMCCAM
1178 union ccb *ccb;
1179 #else
1180 struct mmc_request *req;
1181 #endif
1182 struct mmc_command *cmd;
1183
1184 dprintf("%s\n", __func__);
1185 DWMMC_ASSERT_LOCKED(sc);
1186
1187 #ifdef MMCCAM
1188 ccb = sc->ccb;
1189 if (ccb == NULL)
1190 return;
1191 cmd = &ccb->mmcio.cmd;
1192 #else
1193 req = sc->req;
1194 if (req == NULL)
1195 return;
1196 cmd = req->cmd;
1197 #endif
1198
1199 sc->acd_rcvd = 0;
1200 sc->dto_rcvd = 0;
1201 sc->cmd_done = 0;
1202
1203 /*
1204 * XXX: Wait until card is still busy.
1205 * We do need this to prevent data timeouts,
1206 * mostly caused by multi-block write command
1207 * followed by single-read.
1208 */
1209 while (READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1210 continue;
1211
1212 if (sc->flags & PENDING_CMD) {
1213 sc->flags &= ~PENDING_CMD;
1214 dwmmc_start_cmd(sc, cmd);
1215 return;
1216 } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1217 sc->flags &= ~PENDING_STOP;
1218 /// XXX: What to do with this?
1219 //dwmmc_start_cmd(sc, req->stop);
1220 return;
1221 }
1222
1223 sc->curcmd = NULL;
1224 #ifdef MMCCAM
1225 ccb->ccb_h.status =
1226 (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
1227 xpt_done(ccb);
1228 sc->ccb = NULL;
1229 #else
1230 req->done(req);
1231 sc->req = NULL;
1232 #endif
1233 }
1234
1235 #ifndef MMCCAM
1236 static int
dwmmc_request(device_t brdev,device_t reqdev,struct mmc_request * req)1237 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1238 {
1239 struct dwmmc_softc *sc;
1240
1241 dprintf("%s\n", __func__);
1242
1243 sc = device_get_softc(brdev);
1244
1245 DWMMC_LOCK(sc);
1246 if (sc->req != NULL) {
1247 DWMMC_UNLOCK(sc);
1248 return (EBUSY);
1249 }
1250 sc->req = req;
1251 sc->flags |= PENDING_CMD;
1252 if (sc->req->stop)
1253 sc->flags |= PENDING_STOP;
1254
1255 dwmmc_next_operation(sc);
1256 DWMMC_UNLOCK(sc);
1257
1258 return (0);
1259 }
1260
1261 static int
dwmmc_get_ro(device_t brdev,device_t reqdev)1262 dwmmc_get_ro(device_t brdev, device_t reqdev)
1263 {
1264
1265 dprintf("%s\n", __func__);
1266
1267 return (0);
1268 }
1269
1270 static int
dwmmc_acquire_host(device_t brdev,device_t reqdev)1271 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1272 {
1273 struct dwmmc_softc *sc;
1274
1275 sc = device_get_softc(brdev);
1276
1277 DWMMC_LOCK(sc);
1278 while (sc->bus_busy)
1279 msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1280 sc->bus_busy++;
1281 DWMMC_UNLOCK(sc);
1282 return (0);
1283 }
1284
1285 static int
dwmmc_release_host(device_t brdev,device_t reqdev)1286 dwmmc_release_host(device_t brdev, device_t reqdev)
1287 {
1288 struct dwmmc_softc *sc;
1289
1290 sc = device_get_softc(brdev);
1291
1292 DWMMC_LOCK(sc);
1293 sc->bus_busy--;
1294 wakeup(sc);
1295 DWMMC_UNLOCK(sc);
1296 return (0);
1297 }
1298 #endif /* !MMCCAM */
1299
1300 static int
dwmmc_read_ivar(device_t bus,device_t child,int which,uintptr_t * result)1301 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1302 {
1303 struct dwmmc_softc *sc;
1304
1305 sc = device_get_softc(bus);
1306
1307 switch (which) {
1308 default:
1309 return (EINVAL);
1310 case MMCBR_IVAR_BUS_MODE:
1311 *(int *)result = sc->host.ios.bus_mode;
1312 break;
1313 case MMCBR_IVAR_BUS_WIDTH:
1314 *(int *)result = sc->host.ios.bus_width;
1315 break;
1316 case MMCBR_IVAR_CHIP_SELECT:
1317 *(int *)result = sc->host.ios.chip_select;
1318 break;
1319 case MMCBR_IVAR_CLOCK:
1320 *(int *)result = sc->host.ios.clock;
1321 break;
1322 case MMCBR_IVAR_F_MIN:
1323 *(int *)result = sc->host.f_min;
1324 break;
1325 case MMCBR_IVAR_F_MAX:
1326 *(int *)result = sc->host.f_max;
1327 break;
1328 case MMCBR_IVAR_HOST_OCR:
1329 *(int *)result = sc->host.host_ocr;
1330 break;
1331 case MMCBR_IVAR_MODE:
1332 *(int *)result = sc->host.mode;
1333 break;
1334 case MMCBR_IVAR_OCR:
1335 *(int *)result = sc->host.ocr;
1336 break;
1337 case MMCBR_IVAR_POWER_MODE:
1338 *(int *)result = sc->host.ios.power_mode;
1339 break;
1340 case MMCBR_IVAR_VDD:
1341 *(int *)result = sc->host.ios.vdd;
1342 break;
1343 case MMCBR_IVAR_VCCQ:
1344 *(int *)result = sc->host.ios.vccq;
1345 break;
1346 case MMCBR_IVAR_CAPS:
1347 *(int *)result = sc->host.caps;
1348 break;
1349 case MMCBR_IVAR_MAX_DATA:
1350 *(int *)result = DWMMC_MAX_DATA;
1351 break;
1352 case MMCBR_IVAR_TIMING:
1353 *(int *)result = sc->host.ios.timing;
1354 break;
1355 }
1356 return (0);
1357 }
1358
1359 static int
dwmmc_write_ivar(device_t bus,device_t child,int which,uintptr_t value)1360 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1361 {
1362 struct dwmmc_softc *sc;
1363
1364 sc = device_get_softc(bus);
1365
1366 switch (which) {
1367 default:
1368 return (EINVAL);
1369 case MMCBR_IVAR_BUS_MODE:
1370 sc->host.ios.bus_mode = value;
1371 break;
1372 case MMCBR_IVAR_BUS_WIDTH:
1373 sc->host.ios.bus_width = value;
1374 break;
1375 case MMCBR_IVAR_CHIP_SELECT:
1376 sc->host.ios.chip_select = value;
1377 break;
1378 case MMCBR_IVAR_CLOCK:
1379 sc->host.ios.clock = value;
1380 break;
1381 case MMCBR_IVAR_MODE:
1382 sc->host.mode = value;
1383 break;
1384 case MMCBR_IVAR_OCR:
1385 sc->host.ocr = value;
1386 break;
1387 case MMCBR_IVAR_POWER_MODE:
1388 sc->host.ios.power_mode = value;
1389 break;
1390 case MMCBR_IVAR_VDD:
1391 sc->host.ios.vdd = value;
1392 break;
1393 case MMCBR_IVAR_TIMING:
1394 sc->host.ios.timing = value;
1395 break;
1396 case MMCBR_IVAR_VCCQ:
1397 sc->host.ios.vccq = value;
1398 break;
1399 /* These are read-only */
1400 case MMCBR_IVAR_CAPS:
1401 case MMCBR_IVAR_HOST_OCR:
1402 case MMCBR_IVAR_F_MIN:
1403 case MMCBR_IVAR_F_MAX:
1404 case MMCBR_IVAR_MAX_DATA:
1405 return (EINVAL);
1406 }
1407 return (0);
1408 }
1409
1410 #ifdef MMCCAM
1411 /* Note: this function likely belongs to the specific driver impl */
1412 static int
dwmmc_switch_vccq(device_t dev,device_t child)1413 dwmmc_switch_vccq(device_t dev, device_t child)
1414 {
1415 device_printf(dev, "This is a default impl of switch_vccq() that always fails\n");
1416 return EINVAL;
1417 }
1418
1419 static int
dwmmc_get_tran_settings(device_t dev,struct ccb_trans_settings_mmc * cts)1420 dwmmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1421 {
1422 struct dwmmc_softc *sc;
1423
1424 sc = device_get_softc(dev);
1425
1426 cts->host_ocr = sc->host.host_ocr;
1427 cts->host_f_min = sc->host.f_min;
1428 cts->host_f_max = sc->host.f_max;
1429 cts->host_caps = sc->host.caps;
1430 cts->host_max_data = DWMMC_MAX_DATA;
1431 memcpy(&cts->ios, &sc->host.ios, sizeof(struct mmc_ios));
1432
1433 return (0);
1434 }
1435
1436 static int
dwmmc_set_tran_settings(device_t dev,struct ccb_trans_settings_mmc * cts)1437 dwmmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1438 {
1439 struct dwmmc_softc *sc;
1440 struct mmc_ios *ios;
1441 struct mmc_ios *new_ios;
1442 int res;
1443
1444 sc = device_get_softc(dev);
1445 ios = &sc->host.ios;
1446
1447 new_ios = &cts->ios;
1448
1449 /* Update only requested fields */
1450 if (cts->ios_valid & MMC_CLK) {
1451 ios->clock = new_ios->clock;
1452 if (bootverbose)
1453 device_printf(sc->dev, "Clock => %d\n", ios->clock);
1454 }
1455 if (cts->ios_valid & MMC_VDD) {
1456 ios->vdd = new_ios->vdd;
1457 if (bootverbose)
1458 device_printf(sc->dev, "VDD => %d\n", ios->vdd);
1459 }
1460 if (cts->ios_valid & MMC_CS) {
1461 ios->chip_select = new_ios->chip_select;
1462 if (bootverbose)
1463 device_printf(sc->dev, "CS => %d\n", ios->chip_select);
1464 }
1465 if (cts->ios_valid & MMC_BW) {
1466 ios->bus_width = new_ios->bus_width;
1467 if (bootverbose)
1468 device_printf(sc->dev, "Bus width => %d\n", ios->bus_width);
1469 }
1470 if (cts->ios_valid & MMC_PM) {
1471 ios->power_mode = new_ios->power_mode;
1472 if (bootverbose)
1473 device_printf(sc->dev, "Power mode => %d\n", ios->power_mode);
1474 }
1475 if (cts->ios_valid & MMC_BT) {
1476 ios->timing = new_ios->timing;
1477 if (bootverbose)
1478 device_printf(sc->dev, "Timing => %d\n", ios->timing);
1479 }
1480 if (cts->ios_valid & MMC_BM) {
1481 ios->bus_mode = new_ios->bus_mode;
1482 if (bootverbose)
1483 device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode);
1484 }
1485 if (cts->ios_valid & MMC_VCCQ) {
1486 ios->vccq = new_ios->vccq;
1487 if (bootverbose)
1488 device_printf(sc->dev, "VCCQ => %d\n", ios->vccq);
1489 res = dwmmc_switch_vccq(sc->dev, NULL);
1490 device_printf(sc->dev, "VCCQ switch result: %d\n", res);
1491 }
1492
1493 return (dwmmc_update_ios(sc->dev, NULL));
1494 }
1495
1496 static int
dwmmc_cam_request(device_t dev,union ccb * ccb)1497 dwmmc_cam_request(device_t dev, union ccb *ccb)
1498 {
1499 struct dwmmc_softc *sc;
1500 struct ccb_mmcio *mmcio;
1501
1502 sc = device_get_softc(dev);
1503 DWMMC_LOCK(sc);
1504
1505 KASSERT(ccb->ccb_h.pinfo.index == CAM_ACTIVE_INDEX,
1506 ("%s: ccb %p index %d != CAM_ACTIVE_INDEX: func=%#x %s status %#x\n",
1507 __func__, ccb, ccb->ccb_h.pinfo.index, ccb->ccb_h.func_code,
1508 xpt_action_name(ccb->ccb_h.func_code), ccb->ccb_h.status));
1509
1510 mmcio = &ccb->mmcio;
1511
1512 #ifdef DEBUG
1513 if (__predict_false(bootverbose)) {
1514 device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1515 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
1516 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
1517 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
1518 }
1519 #endif
1520 if (mmcio->cmd.data != NULL) {
1521 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
1522 panic("%s: data %p data->len = %d, data->flags = %d -- something is b0rked",
1523 __func__, mmcio->cmd.data, (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
1524 }
1525
1526 if (sc->ccb != NULL) {
1527 device_printf(sc->dev, "%s: Controller still has an active command: "
1528 "sc->ccb %p new ccb %p\n", __func__, sc->ccb, ccb);
1529 DWMMC_UNLOCK(sc);
1530 return (EBUSY);
1531 }
1532 sc->ccb = ccb;
1533 sc->flags |= PENDING_CMD;
1534
1535 dwmmc_next_operation(sc);
1536 DWMMC_UNLOCK(sc);
1537
1538 return (0);
1539 }
1540
1541 static void
dwmmc_cam_poll(device_t dev)1542 dwmmc_cam_poll(device_t dev)
1543 {
1544 struct dwmmc_softc *sc;
1545
1546 sc = device_get_softc(dev);
1547 dwmmc_intr(sc);
1548 }
1549 #endif /* MMCCAM */
1550
1551 static device_method_t dwmmc_methods[] = {
1552 /* Bus interface */
1553 DEVMETHOD(bus_read_ivar, dwmmc_read_ivar),
1554 DEVMETHOD(bus_write_ivar, dwmmc_write_ivar),
1555
1556 #ifndef MMCCAM
1557 /* mmcbr_if */
1558 DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios),
1559 DEVMETHOD(mmcbr_request, dwmmc_request),
1560 DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro),
1561 DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host),
1562 DEVMETHOD(mmcbr_release_host, dwmmc_release_host),
1563 #endif
1564
1565 #ifdef MMCCAM
1566 /* MMCCAM interface */
1567 DEVMETHOD(mmc_sim_get_tran_settings, dwmmc_get_tran_settings),
1568 DEVMETHOD(mmc_sim_set_tran_settings, dwmmc_set_tran_settings),
1569 DEVMETHOD(mmc_sim_cam_request, dwmmc_cam_request),
1570 DEVMETHOD(mmc_sim_cam_poll, dwmmc_cam_poll),
1571
1572 DEVMETHOD(bus_add_child, bus_generic_add_child),
1573 #endif
1574
1575 DEVMETHOD_END
1576 };
1577
1578 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1579 sizeof(struct dwmmc_softc));
1580