1 /*-
2 * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com>
3 * All rights reserved.
4 *
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * Synopsys DesignWare Mobile Storage Host Controller
33 * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34 */
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/conf.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/module.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/rman.h>
47 #include <sys/queue.h>
48 #include <sys/taskqueue.h>
49
50 #include <dev/mmc/bridge.h>
51 #include <dev/mmc/mmcbrvar.h>
52 #include <dev/mmc/mmc_fdt_helpers.h>
53
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/openfirm.h>
56 #include <dev/ofw/ofw_bus.h>
57 #include <dev/ofw/ofw_bus_subr.h>
58
59 #include <machine/bus.h>
60 #include <machine/cpu.h>
61 #include <machine/intr.h>
62
63 #include <dev/clk/clk.h>
64
65 #include <dev/mmc/host/dwmmc_reg.h>
66 #include <dev/mmc/host/dwmmc_var.h>
67
68 #include "opt_mmccam.h"
69
70 #ifdef MMCCAM
71 #include <cam/cam.h>
72 #include <cam/cam_ccb.h>
73 #include <cam/cam_debug.h>
74 #include <cam/cam_sim.h>
75 #include <cam/cam_xpt_sim.h>
76
77 #include "mmc_sim_if.h"
78 #endif
79
80 #include "mmcbr_if.h"
81
82 #ifdef DEBUG
83 #define dprintf(fmt, args...) printf(fmt, ##args)
84 #else
85 #define dprintf(x, arg...)
86 #endif
87
88 #define READ4(_sc, _reg) \
89 bus_read_4((_sc)->res[0], _reg)
90 #define WRITE4(_sc, _reg, _val) \
91 bus_write_4((_sc)->res[0], _reg, _val)
92
93 #define DIV_ROUND_UP(n, d) howmany(n, d)
94
95 #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
96 #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
97 #define DWMMC_LOCK_INIT(_sc) \
98 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
99 "dwmmc", MTX_DEF)
100 #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
101 #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
102 #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
103
104 #define PENDING_CMD 0x01
105 #define PENDING_STOP 0x02
106 #define CARD_INIT_DONE 0x04
107
108 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
109 |SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE)
110 #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
111 |SDMMC_INTMASK_RE)
112 #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
113 |SDMMC_INTMASK_HLE)
114
115 #define DES0_DIC (1 << 1) /* Disable Interrupt on Completion */
116 #define DES0_LD (1 << 2) /* Last Descriptor */
117 #define DES0_FS (1 << 3) /* First Descriptor */
118 #define DES0_CH (1 << 4) /* second address CHained */
119 #define DES0_ER (1 << 5) /* End of Ring */
120 #define DES0_CES (1 << 30) /* Card Error Summary */
121 #define DES0_OWN (1 << 31) /* OWN */
122
123 #define DES1_BS1_MASK 0x1fff
124
125 struct idmac_desc {
126 uint32_t des0; /* control */
127 uint32_t des1; /* bufsize */
128 uint32_t des2; /* buf1 phys addr */
129 uint32_t des3; /* buf2 phys addr or next descr */
130 };
131
132 #define IDMAC_DESC_SEGS (PAGE_SIZE / (sizeof(struct idmac_desc)))
133 #define IDMAC_DESC_SIZE (sizeof(struct idmac_desc) * IDMAC_DESC_SEGS)
134 #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */
135 /*
136 * Size field in DMA descriptor is 13 bits long (up to 4095 bytes),
137 * but must be a multiple of the data bus size.Additionally, we must ensure
138 * that bus_dmamap_load() doesn't additionally fragments buffer (because it
139 * is processed with page size granularity). Thus limit fragment size to half
140 * of page.
141 * XXX switch descriptor format to array and use second buffer pointer for
142 * second half of page
143 */
144 #define IDMAC_MAX_SIZE 2048
145 /*
146 * Busdma may bounce buffers, so we must reserve 2 descriptors
147 * (on start and on end) for bounced fragments.
148 */
149 #define DWMMC_MAX_DATA (IDMAC_MAX_SIZE * (IDMAC_DESC_SEGS - 2)) / MMC_SECTOR_SIZE
150
151 static void dwmmc_next_operation(struct dwmmc_softc *);
152 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
153 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
154 static int dma_stop(struct dwmmc_softc *);
155 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
156 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
157 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present);
158
159 static struct resource_spec dwmmc_spec[] = {
160 { SYS_RES_MEMORY, 0, RF_ACTIVE },
161 { SYS_RES_IRQ, 0, RF_ACTIVE },
162 { -1, 0 }
163 };
164
165 #define HWTYPE_MASK (0x0000ffff)
166 #define HWFLAG_MASK (0xffff << 16)
167
168 static void
dwmmc_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)169 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
170 {
171
172 if (nsegs != 1)
173 panic("%s: nsegs != 1 (%d)\n", __func__, nsegs);
174 if (error != 0)
175 panic("%s: error != 0 (%d)\n", __func__, error);
176
177 *(bus_addr_t *)arg = segs[0].ds_addr;
178 }
179
180 static void
dwmmc_ring_setup(void * arg,bus_dma_segment_t * segs,int nsegs,int error)181 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
182 {
183 struct dwmmc_softc *sc;
184 int idx;
185
186 sc = arg;
187 dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
188 if (error != 0)
189 panic("%s: error != 0 (%d)\n", __func__, error);
190
191 for (idx = 0; idx < nsegs; idx++) {
192 sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH;
193 sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK;
194 sc->desc_ring[idx].des2 = segs[idx].ds_addr;
195
196 if (idx == 0)
197 sc->desc_ring[idx].des0 |= DES0_FS;
198
199 if (idx == (nsegs - 1)) {
200 sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
201 sc->desc_ring[idx].des0 |= DES0_LD;
202 }
203 wmb();
204 sc->desc_ring[idx].des0 |= DES0_OWN;
205 }
206 }
207
208 static int
dwmmc_ctrl_reset(struct dwmmc_softc * sc,int reset_bits)209 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
210 {
211 int reg;
212 int i;
213
214 reg = READ4(sc, SDMMC_CTRL);
215 reg |= (reset_bits);
216 WRITE4(sc, SDMMC_CTRL, reg);
217
218 /* Wait reset done */
219 for (i = 0; i < 100; i++) {
220 if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
221 return (0);
222 DELAY(10);
223 }
224
225 device_printf(sc->dev, "Reset failed\n");
226
227 return (1);
228 }
229
230 static int
dma_setup(struct dwmmc_softc * sc)231 dma_setup(struct dwmmc_softc *sc)
232 {
233 int error;
234 int nidx;
235 int idx;
236
237 /*
238 * Set up TX descriptor ring, descriptors, and dma maps.
239 */
240 error = bus_dma_tag_create(
241 bus_get_dma_tag(sc->dev), /* Parent tag. */
242 4096, 0, /* alignment, boundary */
243 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
244 BUS_SPACE_MAXADDR, /* highaddr */
245 NULL, NULL, /* filter, filterarg */
246 IDMAC_DESC_SIZE, 1, /* maxsize, nsegments */
247 IDMAC_DESC_SIZE, /* maxsegsize */
248 0, /* flags */
249 NULL, NULL, /* lockfunc, lockarg */
250 &sc->desc_tag);
251 if (error != 0) {
252 device_printf(sc->dev,
253 "could not create ring DMA tag.\n");
254 return (1);
255 }
256
257 error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
258 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
259 &sc->desc_map);
260 if (error != 0) {
261 device_printf(sc->dev,
262 "could not allocate descriptor ring.\n");
263 return (1);
264 }
265
266 error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
267 sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr,
268 &sc->desc_ring_paddr, 0);
269 if (error != 0) {
270 device_printf(sc->dev,
271 "could not load descriptor ring map.\n");
272 return (1);
273 }
274
275 for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) {
276 sc->desc_ring[idx].des0 = DES0_CH;
277 sc->desc_ring[idx].des1 = 0;
278 nidx = (idx + 1) % IDMAC_DESC_SEGS;
279 sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
280 (nidx * sizeof(struct idmac_desc));
281 }
282 sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr;
283 sc->desc_ring[idx - 1].des0 |= DES0_ER;
284
285 error = bus_dma_tag_create(
286 bus_get_dma_tag(sc->dev), /* Parent tag. */
287 8, 0, /* alignment, boundary */
288 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
289 BUS_SPACE_MAXADDR, /* highaddr */
290 NULL, NULL, /* filter, filterarg */
291 IDMAC_MAX_SIZE * IDMAC_DESC_SEGS, /* maxsize */
292 IDMAC_DESC_SEGS, /* nsegments */
293 IDMAC_MAX_SIZE, /* maxsegsize */
294 0, /* flags */
295 NULL, NULL, /* lockfunc, lockarg */
296 &sc->buf_tag);
297 if (error != 0) {
298 device_printf(sc->dev,
299 "could not create ring DMA tag.\n");
300 return (1);
301 }
302
303 error = bus_dmamap_create(sc->buf_tag, 0,
304 &sc->buf_map);
305 if (error != 0) {
306 device_printf(sc->dev,
307 "could not create TX buffer DMA map.\n");
308 return (1);
309 }
310
311 return (0);
312 }
313
314 static void
dwmmc_cmd_done(struct dwmmc_softc * sc)315 dwmmc_cmd_done(struct dwmmc_softc *sc)
316 {
317 struct mmc_command *cmd;
318 #ifdef MMCCAM
319 union ccb *ccb;
320 #endif
321
322 #ifdef MMCCAM
323 ccb = sc->ccb;
324 if (ccb == NULL)
325 return;
326 cmd = &ccb->mmcio.cmd;
327 #else
328 cmd = sc->curcmd;
329 #endif
330 if (cmd == NULL)
331 return;
332
333 if (cmd->flags & MMC_RSP_PRESENT) {
334 if (cmd->flags & MMC_RSP_136) {
335 cmd->resp[3] = READ4(sc, SDMMC_RESP0);
336 cmd->resp[2] = READ4(sc, SDMMC_RESP1);
337 cmd->resp[1] = READ4(sc, SDMMC_RESP2);
338 cmd->resp[0] = READ4(sc, SDMMC_RESP3);
339 } else {
340 cmd->resp[3] = 0;
341 cmd->resp[2] = 0;
342 cmd->resp[1] = 0;
343 cmd->resp[0] = READ4(sc, SDMMC_RESP0);
344 }
345 }
346 }
347
348 static void
dwmmc_tasklet(struct dwmmc_softc * sc)349 dwmmc_tasklet(struct dwmmc_softc *sc)
350 {
351 struct mmc_command *cmd;
352
353 cmd = sc->curcmd;
354 if (cmd == NULL)
355 return;
356
357 if (!sc->cmd_done)
358 return;
359
360 if (cmd->error != MMC_ERR_NONE || !cmd->data) {
361 dwmmc_next_operation(sc);
362 } else if (cmd->data && sc->dto_rcvd) {
363 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
364 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
365 sc->use_auto_stop) {
366 if (sc->acd_rcvd)
367 dwmmc_next_operation(sc);
368 } else {
369 dwmmc_next_operation(sc);
370 }
371 }
372 }
373
374 static void
dwmmc_intr(void * arg)375 dwmmc_intr(void *arg)
376 {
377 struct mmc_command *cmd;
378 struct dwmmc_softc *sc;
379 uint32_t reg;
380
381 sc = arg;
382
383 DWMMC_LOCK(sc);
384
385 cmd = sc->curcmd;
386
387 /* First handle SDMMC controller interrupts */
388 reg = READ4(sc, SDMMC_MINTSTS);
389 if (reg) {
390 dprintf("%s 0x%08x\n", __func__, reg);
391
392 if (reg & DWMMC_CMD_ERR_FLAGS) {
393 dprintf("cmd err 0x%08x cmd 0x%08x\n",
394 reg, cmd->opcode);
395 cmd->error = MMC_ERR_TIMEOUT;
396 }
397
398 if (reg & DWMMC_DATA_ERR_FLAGS) {
399 dprintf("data err 0x%08x cmd 0x%08x\n",
400 reg, cmd->opcode);
401 cmd->error = MMC_ERR_FAILED;
402 if (!sc->use_pio) {
403 dma_done(sc, cmd);
404 dma_stop(sc);
405 }
406 }
407
408 if (reg & SDMMC_INTMASK_CMD_DONE) {
409 dwmmc_cmd_done(sc);
410 sc->cmd_done = 1;
411 }
412
413 if (reg & SDMMC_INTMASK_ACD)
414 sc->acd_rcvd = 1;
415
416 if (reg & SDMMC_INTMASK_DTO)
417 sc->dto_rcvd = 1;
418
419 if (reg & SDMMC_INTMASK_CD) {
420 dwmmc_handle_card_present(sc,
421 READ4(sc, SDMMC_CDETECT) == 0 ? true : false);
422 }
423 }
424
425 /* Ack interrupts */
426 WRITE4(sc, SDMMC_RINTSTS, reg);
427
428 if (sc->use_pio) {
429 if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
430 pio_read(sc, cmd);
431 }
432 if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
433 pio_write(sc, cmd);
434 }
435 } else {
436 /* Now handle DMA interrupts */
437 reg = READ4(sc, SDMMC_IDSTS);
438 if (reg) {
439 dprintf("dma intr 0x%08x\n", reg);
440 if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
441 WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
442 SDMMC_IDINTEN_RI));
443 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
444 dma_done(sc, cmd);
445 }
446 }
447 }
448
449 dwmmc_tasklet(sc);
450
451 DWMMC_UNLOCK(sc);
452 }
453
454 static void
dwmmc_handle_card_present(struct dwmmc_softc * sc,bool is_present)455 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
456 {
457 bool was_present;
458
459 if (dumping || SCHEDULER_STOPPED())
460 return;
461
462 was_present = sc->child != NULL;
463
464 if (!was_present && is_present) {
465 taskqueue_enqueue_timeout(taskqueue_bus,
466 &sc->card_delayed_task, -(hz / 2));
467 } else if (was_present && !is_present) {
468 taskqueue_enqueue(taskqueue_bus, &sc->card_task);
469 }
470 }
471
472 static void
dwmmc_card_task(void * arg,int pending __unused)473 dwmmc_card_task(void *arg, int pending __unused)
474 {
475 struct dwmmc_softc *sc = arg;
476
477 #ifdef MMCCAM
478 mmc_cam_sim_discover(&sc->mmc_sim);
479 #else
480 bus_topo_lock();
481 if (READ4(sc, SDMMC_CDETECT) == 0 ||
482 (sc->mmc_helper.props & MMC_PROP_BROKEN_CD)) {
483 if (sc->child == NULL) {
484 if (bootverbose)
485 device_printf(sc->dev, "Card inserted\n");
486
487 sc->child = device_add_child(sc->dev, "mmc", DEVICE_UNIT_ANY);
488 if (sc->child) {
489 device_set_ivars(sc->child, sc);
490 (void)device_probe_and_attach(sc->child);
491 }
492 }
493 } else {
494 /* Card isn't present, detach if necessary */
495 if (sc->child != NULL) {
496 if (bootverbose)
497 device_printf(sc->dev, "Card removed\n");
498
499 device_delete_child(sc->dev, sc->child);
500 sc->child = NULL;
501 }
502 }
503 bus_topo_unlock();
504 #endif /* MMCCAM */
505 }
506
507 static int
parse_fdt(struct dwmmc_softc * sc)508 parse_fdt(struct dwmmc_softc *sc)
509 {
510 pcell_t dts_value[3];
511 phandle_t node;
512 uint32_t bus_hz = 0;
513 int len;
514 int error;
515
516 if ((node = ofw_bus_get_node(sc->dev)) == -1)
517 return (ENXIO);
518
519 /* Set some defaults for freq and supported mode */
520 sc->host.f_min = 400000;
521 sc->host.f_max = 200000000;
522 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
523 sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
524 mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host);
525
526 /* fifo-depth */
527 if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
528 OF_getencprop(node, "fifo-depth", dts_value, len);
529 sc->fifo_depth = dts_value[0];
530 }
531
532 /* num-slots (Deprecated) */
533 sc->num_slots = 1;
534 if ((len = OF_getproplen(node, "num-slots")) > 0) {
535 device_printf(sc->dev, "num-slots property is deprecated\n");
536 OF_getencprop(node, "num-slots", dts_value, len);
537 sc->num_slots = dts_value[0];
538 }
539
540 /* clock-frequency */
541 if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
542 OF_getencprop(node, "clock-frequency", dts_value, len);
543 bus_hz = dts_value[0];
544 }
545
546 /* IP block reset is optional */
547 error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset);
548 if (error != 0 &&
549 error != ENOENT &&
550 error != ENODEV) {
551 device_printf(sc->dev, "Cannot get reset\n");
552 goto fail;
553 }
554
555 /* vmmc regulator is optional */
556 error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply",
557 &sc->vmmc);
558 if (error != 0 &&
559 error != ENOENT &&
560 error != ENODEV) {
561 device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n");
562 goto fail;
563 }
564
565 /* vqmmc regulator is optional */
566 error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply",
567 &sc->vqmmc);
568 if (error != 0 &&
569 error != ENOENT &&
570 error != ENODEV) {
571 device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n");
572 goto fail;
573 }
574
575 /* Assert reset first */
576 if (sc->hwreset != NULL) {
577 error = hwreset_assert(sc->hwreset);
578 if (error != 0) {
579 device_printf(sc->dev, "Cannot assert reset\n");
580 goto fail;
581 }
582 }
583
584 /* BIU (Bus Interface Unit clock) is optional */
585 error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
586 if (error != 0 &&
587 error != ENOENT &&
588 error != ENODEV) {
589 device_printf(sc->dev, "Cannot get 'biu' clock\n");
590 goto fail;
591 }
592
593 if (sc->biu) {
594 error = clk_enable(sc->biu);
595 if (error != 0) {
596 device_printf(sc->dev, "cannot enable biu clock\n");
597 goto fail;
598 }
599 }
600
601 /*
602 * CIU (Controller Interface Unit clock) is mandatory
603 * if no clock-frequency property is given
604 */
605 error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
606 if (error != 0 &&
607 error != ENOENT &&
608 error != ENODEV) {
609 device_printf(sc->dev, "Cannot get 'ciu' clock\n");
610 goto fail;
611 }
612
613 if (sc->ciu) {
614 if (bus_hz != 0) {
615 error = clk_set_freq(sc->ciu, bus_hz, 0);
616 if (error != 0)
617 device_printf(sc->dev,
618 "cannot set ciu clock to %u\n", bus_hz);
619 }
620 error = clk_enable(sc->ciu);
621 if (error != 0) {
622 device_printf(sc->dev, "cannot enable ciu clock\n");
623 goto fail;
624 }
625 clk_get_freq(sc->ciu, &sc->bus_hz);
626 }
627
628 /* Enable regulators */
629 if (sc->vmmc != NULL) {
630 error = regulator_enable(sc->vmmc);
631 if (error != 0) {
632 device_printf(sc->dev, "Cannot enable vmmc regulator\n");
633 goto fail;
634 }
635 }
636 if (sc->vqmmc != NULL) {
637 error = regulator_enable(sc->vqmmc);
638 if (error != 0) {
639 device_printf(sc->dev, "Cannot enable vqmmc regulator\n");
640 goto fail;
641 }
642 }
643
644 /* Take dwmmc out of reset */
645 if (sc->hwreset != NULL) {
646 error = hwreset_deassert(sc->hwreset);
647 if (error != 0) {
648 device_printf(sc->dev, "Cannot deassert reset\n");
649 goto fail;
650 }
651 }
652
653 if (sc->bus_hz == 0) {
654 device_printf(sc->dev, "No bus speed provided\n");
655 goto fail;
656 }
657
658 return (0);
659
660 fail:
661 return (ENXIO);
662 }
663
664 int
dwmmc_attach(device_t dev)665 dwmmc_attach(device_t dev)
666 {
667 struct dwmmc_softc *sc;
668 int error;
669
670 sc = device_get_softc(dev);
671
672 sc->dev = dev;
673
674 /* Why not to use Auto Stop? It save a hundred of irq per second */
675 sc->use_auto_stop = 1;
676
677 error = parse_fdt(sc);
678 if (error != 0) {
679 device_printf(dev, "Can't get FDT property.\n");
680 return (ENXIO);
681 }
682
683 DWMMC_LOCK_INIT(sc);
684
685 if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
686 device_printf(dev, "could not allocate resources\n");
687 return (ENXIO);
688 }
689
690 /* Setup interrupt handler. */
691 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
692 NULL, dwmmc_intr, sc, &sc->intr_cookie);
693 if (error != 0) {
694 device_printf(dev, "could not setup interrupt handler.\n");
695 return (ENXIO);
696 }
697
698 device_printf(dev, "Hardware version ID is %04x\n",
699 READ4(sc, SDMMC_VERID) & 0xffff);
700
701 /* Reset all */
702 if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
703 SDMMC_CTRL_FIFO_RESET |
704 SDMMC_CTRL_DMA_RESET)))
705 return (ENXIO);
706
707 dwmmc_setup_bus(sc, sc->host.f_min);
708
709 if (sc->fifo_depth == 0) {
710 sc->fifo_depth = 1 +
711 ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
712 device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
713 sc->fifo_depth);
714 }
715
716 if (!sc->use_pio) {
717 dma_stop(sc);
718 if (dma_setup(sc))
719 return (ENXIO);
720
721 /* Install desc base */
722 WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
723
724 /* Enable DMA interrupts */
725 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
726 WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
727 SDMMC_IDINTEN_RI |
728 SDMMC_IDINTEN_TI));
729 }
730
731 /* Clear and disable interrups for a while */
732 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
733 WRITE4(sc, SDMMC_INTMASK, 0);
734
735 /* Maximum timeout */
736 WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
737
738 /* Enable interrupts */
739 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
740 WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
741 SDMMC_INTMASK_DTO |
742 SDMMC_INTMASK_ACD |
743 SDMMC_INTMASK_TXDR |
744 SDMMC_INTMASK_RXDR |
745 DWMMC_ERR_FLAGS |
746 SDMMC_INTMASK_CD));
747 WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
748
749 TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
750 TIMEOUT_TASK_INIT(taskqueue_bus, &sc->card_delayed_task, 0,
751 dwmmc_card_task, sc);
752
753 #ifdef MMCCAM
754 sc->ccb = NULL;
755 if (mmc_cam_sim_alloc(dev, "dw_mmc", &sc->mmc_sim) != 0) {
756 device_printf(dev, "cannot alloc cam sim\n");
757 dwmmc_detach(dev);
758 return (ENXIO);
759 }
760 #endif
761 /*
762 * Schedule a card detection as we won't get an interrupt
763 * if the card is inserted when we attach
764 */
765 dwmmc_card_task(sc, 0);
766 return (0);
767 }
768
769 int
dwmmc_detach(device_t dev)770 dwmmc_detach(device_t dev)
771 {
772 struct dwmmc_softc *sc;
773 int ret;
774
775 sc = device_get_softc(dev);
776
777 ret = bus_generic_detach(dev);
778 if (ret != 0)
779 return (ret);
780
781 taskqueue_drain(taskqueue_bus, &sc->card_task);
782 taskqueue_drain_timeout(taskqueue_bus, &sc->card_delayed_task);
783
784 if (sc->intr_cookie != NULL) {
785 ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
786 if (ret != 0)
787 return (ret);
788 }
789 bus_release_resources(dev, dwmmc_spec, sc->res);
790
791 DWMMC_LOCK_DESTROY(sc);
792
793 if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0)
794 device_printf(sc->dev, "cannot deassert reset\n");
795 if (sc->biu != NULL && clk_disable(sc->biu) != 0)
796 device_printf(sc->dev, "cannot disable biu clock\n");
797 if (sc->ciu != NULL && clk_disable(sc->ciu) != 0)
798 device_printf(sc->dev, "cannot disable ciu clock\n");
799
800 if (sc->vmmc && regulator_disable(sc->vmmc) != 0)
801 device_printf(sc->dev, "Cannot disable vmmc regulator\n");
802 if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0)
803 device_printf(sc->dev, "Cannot disable vqmmc regulator\n");
804
805 #ifdef MMCCAM
806 mmc_cam_sim_free(&sc->mmc_sim);
807 #endif
808
809 return (0);
810 }
811
812 static int
dwmmc_setup_bus(struct dwmmc_softc * sc,int freq)813 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
814 {
815 int tout;
816 int div;
817
818 if (freq == 0) {
819 WRITE4(sc, SDMMC_CLKENA, 0);
820 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
821 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
822
823 tout = 1000;
824 do {
825 if (tout-- < 0) {
826 device_printf(sc->dev, "Failed update clk\n");
827 return (1);
828 }
829 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
830
831 return (0);
832 }
833
834 WRITE4(sc, SDMMC_CLKENA, 0);
835 WRITE4(sc, SDMMC_CLKSRC, 0);
836
837 div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
838
839 WRITE4(sc, SDMMC_CLKDIV, div);
840 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
841 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
842
843 tout = 1000;
844 do {
845 if (tout-- < 0) {
846 device_printf(sc->dev, "Failed to update clk\n");
847 return (1);
848 }
849 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
850
851 WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
852 WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
853 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
854
855 tout = 1000;
856 do {
857 if (tout-- < 0) {
858 device_printf(sc->dev, "Failed to enable clk\n");
859 return (1);
860 }
861 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
862
863 return (0);
864 }
865
866 static int
dwmmc_update_ios(device_t brdev,device_t reqdev)867 dwmmc_update_ios(device_t brdev, device_t reqdev)
868 {
869 struct dwmmc_softc *sc;
870 struct mmc_ios *ios;
871 uint32_t reg;
872 int ret = 0;
873
874 sc = device_get_softc(brdev);
875 ios = &sc->host.ios;
876
877 dprintf("Setting up clk %u bus_width %d, timing: %d\n",
878 ios->clock, ios->bus_width, ios->timing);
879
880 switch (ios->power_mode) {
881 case power_on:
882 break;
883 case power_off:
884 WRITE4(sc, SDMMC_PWREN, 0);
885 break;
886 case power_up:
887 WRITE4(sc, SDMMC_PWREN, 1);
888 break;
889 }
890
891 mmc_fdt_set_power(&sc->mmc_helper, ios->power_mode);
892
893 if (ios->bus_width == bus_width_8)
894 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
895 else if (ios->bus_width == bus_width_4)
896 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
897 else
898 WRITE4(sc, SDMMC_CTYPE, 0);
899
900 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
901 /* XXX: take care about DDR or SDR use here */
902 WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
903 }
904
905 /* Set DDR mode */
906 reg = READ4(sc, SDMMC_UHS_REG);
907 if (ios->timing == bus_timing_uhs_ddr50 ||
908 ios->timing == bus_timing_mmc_ddr52 ||
909 ios->timing == bus_timing_mmc_hs400)
910 reg |= (SDMMC_UHS_REG_DDR);
911 else
912 reg &= ~(SDMMC_UHS_REG_DDR);
913 WRITE4(sc, SDMMC_UHS_REG, reg);
914
915 if (sc->update_ios)
916 ret = sc->update_ios(sc, ios);
917
918 dwmmc_setup_bus(sc, ios->clock);
919
920 return (ret);
921 }
922
923 static int
dma_done(struct dwmmc_softc * sc,struct mmc_command * cmd)924 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
925 {
926 struct mmc_data *data;
927
928 data = cmd->data;
929
930 if (data->flags & MMC_DATA_WRITE)
931 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
932 BUS_DMASYNC_POSTWRITE);
933 else
934 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
935 BUS_DMASYNC_POSTREAD);
936
937 bus_dmamap_sync(sc->desc_tag, sc->desc_map,
938 BUS_DMASYNC_POSTWRITE);
939
940 bus_dmamap_unload(sc->buf_tag, sc->buf_map);
941
942 return (0);
943 }
944
945 static int
dma_stop(struct dwmmc_softc * sc)946 dma_stop(struct dwmmc_softc *sc)
947 {
948 int reg;
949
950 reg = READ4(sc, SDMMC_CTRL);
951 reg &= ~(SDMMC_CTRL_USE_IDMAC);
952 reg |= (SDMMC_CTRL_DMA_RESET);
953 WRITE4(sc, SDMMC_CTRL, reg);
954
955 reg = READ4(sc, SDMMC_BMOD);
956 reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
957 reg |= (SDMMC_BMOD_SWR);
958 WRITE4(sc, SDMMC_BMOD, reg);
959
960 return (0);
961 }
962
963 static int
dma_prepare(struct dwmmc_softc * sc,struct mmc_command * cmd)964 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
965 {
966 struct mmc_data *data;
967 int err;
968 int reg;
969
970 data = cmd->data;
971
972 reg = READ4(sc, SDMMC_INTMASK);
973 reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
974 WRITE4(sc, SDMMC_INTMASK, reg);
975 dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len);
976 err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
977 data->data, data->len, dwmmc_ring_setup,
978 sc, BUS_DMA_NOWAIT);
979 if (err != 0)
980 panic("dmamap_load failed\n");
981
982 /* Ensure the device can see the desc */
983 bus_dmamap_sync(sc->desc_tag, sc->desc_map,
984 BUS_DMASYNC_PREWRITE);
985
986 if (data->flags & MMC_DATA_WRITE)
987 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
988 BUS_DMASYNC_PREWRITE);
989 else
990 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
991 BUS_DMASYNC_PREREAD);
992
993 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
994 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
995 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
996
997 WRITE4(sc, SDMMC_FIFOTH, reg);
998 wmb();
999
1000 reg = READ4(sc, SDMMC_CTRL);
1001 reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
1002 WRITE4(sc, SDMMC_CTRL, reg);
1003 wmb();
1004
1005 reg = READ4(sc, SDMMC_BMOD);
1006 reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
1007 WRITE4(sc, SDMMC_BMOD, reg);
1008
1009 /* Start */
1010 WRITE4(sc, SDMMC_PLDMND, 1);
1011
1012 return (0);
1013 }
1014
1015 static int
pio_prepare(struct dwmmc_softc * sc,struct mmc_command * cmd)1016 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
1017 {
1018 struct mmc_data *data;
1019 int reg;
1020
1021 data = cmd->data;
1022 data->xfer_len = 0;
1023
1024 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1025 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1026 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1027
1028 WRITE4(sc, SDMMC_FIFOTH, reg);
1029 wmb();
1030
1031 return (0);
1032 }
1033
1034 static void
pio_read(struct dwmmc_softc * sc,struct mmc_command * cmd)1035 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
1036 {
1037 struct mmc_data *data;
1038 uint32_t *p, status;
1039
1040 if (cmd == NULL || cmd->data == NULL)
1041 return;
1042
1043 data = cmd->data;
1044 if ((data->flags & MMC_DATA_READ) == 0)
1045 return;
1046
1047 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1048 p = (uint32_t *)data->data + (data->xfer_len >> 2);
1049
1050 while (data->xfer_len < data->len) {
1051 status = READ4(sc, SDMMC_STATUS);
1052 if (status & SDMMC_STATUS_FIFO_EMPTY)
1053 break;
1054 *p++ = READ4(sc, SDMMC_DATA);
1055 data->xfer_len += 4;
1056 }
1057
1058 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
1059 }
1060
1061 static void
pio_write(struct dwmmc_softc * sc,struct mmc_command * cmd)1062 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
1063 {
1064 struct mmc_data *data;
1065 uint32_t *p, status;
1066
1067 if (cmd == NULL || cmd->data == NULL)
1068 return;
1069
1070 data = cmd->data;
1071 if ((data->flags & MMC_DATA_WRITE) == 0)
1072 return;
1073
1074 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1075 p = (uint32_t *)data->data + (data->xfer_len >> 2);
1076
1077 while (data->xfer_len < data->len) {
1078 status = READ4(sc, SDMMC_STATUS);
1079 if (status & SDMMC_STATUS_FIFO_FULL)
1080 break;
1081 WRITE4(sc, SDMMC_DATA, *p++);
1082 data->xfer_len += 4;
1083 }
1084
1085 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
1086 }
1087
1088 static void
dwmmc_start_cmd(struct dwmmc_softc * sc,struct mmc_command * cmd)1089 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
1090 {
1091 struct mmc_data *data;
1092 uint32_t blksz;
1093 uint32_t cmdr;
1094
1095 dprintf("%s\n", __func__);
1096 sc->curcmd = cmd;
1097 data = cmd->data;
1098
1099 #ifndef MMCCAM
1100 /* XXX Upper layers don't always set this */
1101 cmd->mrq = sc->req;
1102 #endif
1103 /* Begin setting up command register. */
1104
1105 cmdr = cmd->opcode;
1106
1107 dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
1108
1109 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
1110 cmd->opcode == MMC_GO_IDLE_STATE ||
1111 cmd->opcode == MMC_GO_INACTIVE_STATE)
1112 cmdr |= SDMMC_CMD_STOP_ABORT;
1113 else if (cmd->opcode != MMC_SEND_STATUS && data)
1114 cmdr |= SDMMC_CMD_WAIT_PRVDATA;
1115
1116 /* Set up response handling. */
1117 if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
1118 cmdr |= SDMMC_CMD_RESP_EXP;
1119 if (cmd->flags & MMC_RSP_136)
1120 cmdr |= SDMMC_CMD_RESP_LONG;
1121 }
1122
1123 if (cmd->flags & MMC_RSP_CRC)
1124 cmdr |= SDMMC_CMD_RESP_CRC;
1125
1126 /*
1127 * XXX: Not all platforms want this.
1128 */
1129 cmdr |= SDMMC_CMD_USE_HOLD_REG;
1130
1131 if ((sc->flags & CARD_INIT_DONE) == 0) {
1132 sc->flags |= (CARD_INIT_DONE);
1133 cmdr |= SDMMC_CMD_SEND_INIT;
1134 }
1135
1136 if (data) {
1137 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1138 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
1139 sc->use_auto_stop)
1140 cmdr |= SDMMC_CMD_SEND_ASTOP;
1141
1142 cmdr |= SDMMC_CMD_DATA_EXP;
1143 if (data->flags & MMC_DATA_STREAM)
1144 cmdr |= SDMMC_CMD_MODE_STREAM;
1145 if (data->flags & MMC_DATA_WRITE)
1146 cmdr |= SDMMC_CMD_DATA_WRITE;
1147
1148 WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
1149 #ifdef MMCCAM
1150 if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1151 WRITE4(sc, SDMMC_BLKSIZ, cmd->data->block_size);
1152 WRITE4(sc, SDMMC_BYTCNT, cmd->data->len);
1153 } else
1154 #endif
1155 {
1156 WRITE4(sc, SDMMC_BYTCNT, data->len);
1157 blksz = (data->len < MMC_SECTOR_SIZE) ? \
1158 data->len : MMC_SECTOR_SIZE;
1159 WRITE4(sc, SDMMC_BLKSIZ, blksz);
1160 }
1161
1162 if (sc->use_pio) {
1163 pio_prepare(sc, cmd);
1164 } else {
1165 dma_prepare(sc, cmd);
1166 }
1167 wmb();
1168 }
1169
1170 dprintf("cmdr 0x%08x\n", cmdr);
1171
1172 WRITE4(sc, SDMMC_CMDARG, cmd->arg);
1173 wmb();
1174 WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
1175 };
1176
1177 static void
dwmmc_next_operation(struct dwmmc_softc * sc)1178 dwmmc_next_operation(struct dwmmc_softc *sc)
1179 {
1180 struct mmc_command *cmd;
1181 dprintf("%s\n", __func__);
1182 #ifdef MMCCAM
1183 union ccb *ccb;
1184
1185 ccb = sc->ccb;
1186 if (ccb == NULL)
1187 return;
1188 cmd = &ccb->mmcio.cmd;
1189 #else
1190 struct mmc_request *req;
1191
1192 req = sc->req;
1193 if (req == NULL)
1194 return;
1195 cmd = req->cmd;
1196 #endif
1197
1198 sc->acd_rcvd = 0;
1199 sc->dto_rcvd = 0;
1200 sc->cmd_done = 0;
1201
1202 /*
1203 * XXX: Wait until card is still busy.
1204 * We do need this to prevent data timeouts,
1205 * mostly caused by multi-block write command
1206 * followed by single-read.
1207 */
1208 while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1209 continue;
1210
1211 if (sc->flags & PENDING_CMD) {
1212 sc->flags &= ~PENDING_CMD;
1213 dwmmc_start_cmd(sc, cmd);
1214 return;
1215 } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1216 sc->flags &= ~PENDING_STOP;
1217 /// XXX: What to do with this?
1218 //dwmmc_start_cmd(sc, req->stop);
1219 return;
1220 }
1221
1222 #ifdef MMCCAM
1223 sc->ccb = NULL;
1224 sc->curcmd = NULL;
1225 ccb->ccb_h.status =
1226 (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
1227 xpt_done(ccb);
1228 #else
1229 sc->req = NULL;
1230 sc->curcmd = NULL;
1231 req->done(req);
1232 #endif
1233 }
1234
1235 static int
dwmmc_request(device_t brdev,device_t reqdev,struct mmc_request * req)1236 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1237 {
1238 struct dwmmc_softc *sc;
1239
1240 sc = device_get_softc(brdev);
1241
1242 dprintf("%s\n", __func__);
1243
1244 DWMMC_LOCK(sc);
1245
1246 #ifdef MMCCAM
1247 sc->flags |= PENDING_CMD;
1248 #else
1249 if (sc->req != NULL) {
1250 DWMMC_UNLOCK(sc);
1251 return (EBUSY);
1252 }
1253
1254 sc->req = req;
1255 sc->flags |= PENDING_CMD;
1256 if (sc->req->stop)
1257 sc->flags |= PENDING_STOP;
1258 #endif
1259 dwmmc_next_operation(sc);
1260
1261 DWMMC_UNLOCK(sc);
1262 return (0);
1263 }
1264
1265 #ifndef MMCCAM
1266 static int
dwmmc_get_ro(device_t brdev,device_t reqdev)1267 dwmmc_get_ro(device_t brdev, device_t reqdev)
1268 {
1269
1270 dprintf("%s\n", __func__);
1271
1272 return (0);
1273 }
1274
1275 static int
dwmmc_acquire_host(device_t brdev,device_t reqdev)1276 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1277 {
1278 struct dwmmc_softc *sc;
1279
1280 sc = device_get_softc(brdev);
1281
1282 DWMMC_LOCK(sc);
1283 while (sc->bus_busy)
1284 msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1285 sc->bus_busy++;
1286 DWMMC_UNLOCK(sc);
1287 return (0);
1288 }
1289
1290 static int
dwmmc_release_host(device_t brdev,device_t reqdev)1291 dwmmc_release_host(device_t brdev, device_t reqdev)
1292 {
1293 struct dwmmc_softc *sc;
1294
1295 sc = device_get_softc(brdev);
1296
1297 DWMMC_LOCK(sc);
1298 sc->bus_busy--;
1299 wakeup(sc);
1300 DWMMC_UNLOCK(sc);
1301 return (0);
1302 }
1303 #endif /* !MMCCAM */
1304
1305 static int
dwmmc_read_ivar(device_t bus,device_t child,int which,uintptr_t * result)1306 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1307 {
1308 struct dwmmc_softc *sc;
1309
1310 sc = device_get_softc(bus);
1311
1312 switch (which) {
1313 default:
1314 return (EINVAL);
1315 case MMCBR_IVAR_BUS_MODE:
1316 *(int *)result = sc->host.ios.bus_mode;
1317 break;
1318 case MMCBR_IVAR_BUS_WIDTH:
1319 *(int *)result = sc->host.ios.bus_width;
1320 break;
1321 case MMCBR_IVAR_CHIP_SELECT:
1322 *(int *)result = sc->host.ios.chip_select;
1323 break;
1324 case MMCBR_IVAR_CLOCK:
1325 *(int *)result = sc->host.ios.clock;
1326 break;
1327 case MMCBR_IVAR_F_MIN:
1328 *(int *)result = sc->host.f_min;
1329 break;
1330 case MMCBR_IVAR_F_MAX:
1331 *(int *)result = sc->host.f_max;
1332 break;
1333 case MMCBR_IVAR_HOST_OCR:
1334 *(int *)result = sc->host.host_ocr;
1335 break;
1336 case MMCBR_IVAR_MODE:
1337 *(int *)result = sc->host.mode;
1338 break;
1339 case MMCBR_IVAR_OCR:
1340 *(int *)result = sc->host.ocr;
1341 break;
1342 case MMCBR_IVAR_POWER_MODE:
1343 *(int *)result = sc->host.ios.power_mode;
1344 break;
1345 case MMCBR_IVAR_VDD:
1346 *(int *)result = sc->host.ios.vdd;
1347 break;
1348 case MMCBR_IVAR_VCCQ:
1349 *(int *)result = sc->host.ios.vccq;
1350 break;
1351 case MMCBR_IVAR_CAPS:
1352 *(int *)result = sc->host.caps;
1353 break;
1354 case MMCBR_IVAR_MAX_DATA:
1355 *(int *)result = DWMMC_MAX_DATA;
1356 break;
1357 case MMCBR_IVAR_TIMING:
1358 *(int *)result = sc->host.ios.timing;
1359 break;
1360 }
1361 return (0);
1362 }
1363
1364 static int
dwmmc_write_ivar(device_t bus,device_t child,int which,uintptr_t value)1365 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1366 {
1367 struct dwmmc_softc *sc;
1368
1369 sc = device_get_softc(bus);
1370
1371 switch (which) {
1372 default:
1373 return (EINVAL);
1374 case MMCBR_IVAR_BUS_MODE:
1375 sc->host.ios.bus_mode = value;
1376 break;
1377 case MMCBR_IVAR_BUS_WIDTH:
1378 sc->host.ios.bus_width = value;
1379 break;
1380 case MMCBR_IVAR_CHIP_SELECT:
1381 sc->host.ios.chip_select = value;
1382 break;
1383 case MMCBR_IVAR_CLOCK:
1384 sc->host.ios.clock = value;
1385 break;
1386 case MMCBR_IVAR_MODE:
1387 sc->host.mode = value;
1388 break;
1389 case MMCBR_IVAR_OCR:
1390 sc->host.ocr = value;
1391 break;
1392 case MMCBR_IVAR_POWER_MODE:
1393 sc->host.ios.power_mode = value;
1394 break;
1395 case MMCBR_IVAR_VDD:
1396 sc->host.ios.vdd = value;
1397 break;
1398 case MMCBR_IVAR_TIMING:
1399 sc->host.ios.timing = value;
1400 break;
1401 case MMCBR_IVAR_VCCQ:
1402 sc->host.ios.vccq = value;
1403 break;
1404 /* These are read-only */
1405 case MMCBR_IVAR_CAPS:
1406 case MMCBR_IVAR_HOST_OCR:
1407 case MMCBR_IVAR_F_MIN:
1408 case MMCBR_IVAR_F_MAX:
1409 case MMCBR_IVAR_MAX_DATA:
1410 return (EINVAL);
1411 }
1412 return (0);
1413 }
1414
1415 #ifdef MMCCAM
1416 /* Note: this function likely belongs to the specific driver impl */
1417 static int
dwmmc_switch_vccq(device_t dev,device_t child)1418 dwmmc_switch_vccq(device_t dev, device_t child)
1419 {
1420 device_printf(dev, "This is a default impl of switch_vccq() that always fails\n");
1421 return EINVAL;
1422 }
1423
1424 static int
dwmmc_get_tran_settings(device_t dev,struct ccb_trans_settings_mmc * cts)1425 dwmmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1426 {
1427 struct dwmmc_softc *sc;
1428
1429 sc = device_get_softc(dev);
1430
1431 cts->host_ocr = sc->host.host_ocr;
1432 cts->host_f_min = sc->host.f_min;
1433 cts->host_f_max = sc->host.f_max;
1434 cts->host_caps = sc->host.caps;
1435 cts->host_max_data = DWMMC_MAX_DATA;
1436 memcpy(&cts->ios, &sc->host.ios, sizeof(struct mmc_ios));
1437
1438 return (0);
1439 }
1440
1441 static int
dwmmc_set_tran_settings(device_t dev,struct ccb_trans_settings_mmc * cts)1442 dwmmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1443 {
1444 struct dwmmc_softc *sc;
1445 struct mmc_ios *ios;
1446 struct mmc_ios *new_ios;
1447 int res;
1448
1449 sc = device_get_softc(dev);
1450 ios = &sc->host.ios;
1451
1452 new_ios = &cts->ios;
1453
1454 /* Update only requested fields */
1455 if (cts->ios_valid & MMC_CLK) {
1456 ios->clock = new_ios->clock;
1457 if (bootverbose)
1458 device_printf(sc->dev, "Clock => %d\n", ios->clock);
1459 }
1460 if (cts->ios_valid & MMC_VDD) {
1461 ios->vdd = new_ios->vdd;
1462 if (bootverbose)
1463 device_printf(sc->dev, "VDD => %d\n", ios->vdd);
1464 }
1465 if (cts->ios_valid & MMC_CS) {
1466 ios->chip_select = new_ios->chip_select;
1467 if (bootverbose)
1468 device_printf(sc->dev, "CS => %d\n", ios->chip_select);
1469 }
1470 if (cts->ios_valid & MMC_BW) {
1471 ios->bus_width = new_ios->bus_width;
1472 if (bootverbose)
1473 device_printf(sc->dev, "Bus width => %d\n", ios->bus_width);
1474 }
1475 if (cts->ios_valid & MMC_PM) {
1476 ios->power_mode = new_ios->power_mode;
1477 if (bootverbose)
1478 device_printf(sc->dev, "Power mode => %d\n", ios->power_mode);
1479 }
1480 if (cts->ios_valid & MMC_BT) {
1481 ios->timing = new_ios->timing;
1482 if (bootverbose)
1483 device_printf(sc->dev, "Timing => %d\n", ios->timing);
1484 }
1485 if (cts->ios_valid & MMC_BM) {
1486 ios->bus_mode = new_ios->bus_mode;
1487 if (bootverbose)
1488 device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode);
1489 }
1490 if (cts->ios_valid & MMC_VCCQ) {
1491 ios->vccq = new_ios->vccq;
1492 if (bootverbose)
1493 device_printf(sc->dev, "VCCQ => %d\n", ios->vccq);
1494 res = dwmmc_switch_vccq(sc->dev, NULL);
1495 device_printf(sc->dev, "VCCQ switch result: %d\n", res);
1496 }
1497
1498 return (dwmmc_update_ios(sc->dev, NULL));
1499 }
1500
1501 static int
dwmmc_cam_request(device_t dev,union ccb * ccb)1502 dwmmc_cam_request(device_t dev, union ccb *ccb)
1503 {
1504 struct dwmmc_softc *sc;
1505 struct ccb_mmcio *mmcio;
1506
1507 sc = device_get_softc(dev);
1508 mmcio = &ccb->mmcio;
1509
1510 DWMMC_LOCK(sc);
1511
1512 #ifdef DEBUG
1513 if (__predict_false(bootverbose)) {
1514 device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1515 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
1516 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
1517 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
1518 }
1519 #endif
1520 if (mmcio->cmd.data != NULL) {
1521 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
1522 panic("data->len = %d, data->flags = %d -- something is b0rked",
1523 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
1524 }
1525 if (sc->ccb != NULL) {
1526 device_printf(sc->dev, "Controller still has an active command\n");
1527 return (EBUSY);
1528 }
1529 sc->ccb = ccb;
1530 DWMMC_UNLOCK(sc);
1531 dwmmc_request(sc->dev, NULL, NULL);
1532
1533 return (0);
1534 }
1535
1536 static void
dwmmc_cam_poll(device_t dev)1537 dwmmc_cam_poll(device_t dev)
1538 {
1539 struct dwmmc_softc *sc;
1540
1541 sc = device_get_softc(dev);
1542 dwmmc_intr(sc);
1543 }
1544 #endif /* MMCCAM */
1545
1546 static device_method_t dwmmc_methods[] = {
1547 /* Bus interface */
1548 DEVMETHOD(bus_read_ivar, dwmmc_read_ivar),
1549 DEVMETHOD(bus_write_ivar, dwmmc_write_ivar),
1550
1551 #ifndef MMCCAM
1552 /* mmcbr_if */
1553 DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios),
1554 DEVMETHOD(mmcbr_request, dwmmc_request),
1555 DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro),
1556 DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host),
1557 DEVMETHOD(mmcbr_release_host, dwmmc_release_host),
1558 #endif
1559
1560 #ifdef MMCCAM
1561 /* MMCCAM interface */
1562 DEVMETHOD(mmc_sim_get_tran_settings, dwmmc_get_tran_settings),
1563 DEVMETHOD(mmc_sim_set_tran_settings, dwmmc_set_tran_settings),
1564 DEVMETHOD(mmc_sim_cam_request, dwmmc_cam_request),
1565 DEVMETHOD(mmc_sim_cam_poll, dwmmc_cam_poll),
1566
1567 DEVMETHOD(bus_add_child, bus_generic_add_child),
1568 #endif
1569
1570 DEVMETHOD_END
1571 };
1572
1573 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1574 sizeof(struct dwmmc_softc));
1575