1 /*-
2 * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com>
3 * All rights reserved.
4 *
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * Synopsys DesignWare Mobile Storage Host Controller
33 * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34 */
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/conf.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/module.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/rman.h>
47 #include <sys/queue.h>
48 #include <sys/taskqueue.h>
49
50 #include <dev/mmc/bridge.h>
51 #include <dev/mmc/mmcbrvar.h>
52 #include <dev/mmc/mmc_fdt_helpers.h>
53
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/openfirm.h>
56 #include <dev/ofw/ofw_bus.h>
57 #include <dev/ofw/ofw_bus_subr.h>
58
59 #include <machine/bus.h>
60 #include <machine/cpu.h>
61 #include <machine/intr.h>
62
63 #include <dev/clk/clk.h>
64
65 #include <dev/mmc/host/dwmmc_reg.h>
66 #include <dev/mmc/host/dwmmc_var.h>
67
68 #include "opt_mmccam.h"
69
70 #ifdef MMCCAM
71 #include <cam/cam.h>
72 #include <cam/cam_ccb.h>
73 #include <cam/cam_debug.h>
74 #include <cam/cam_sim.h>
75 #include <cam/cam_xpt_sim.h>
76
77 #include "mmc_sim_if.h"
78 #endif
79
80 #include "mmcbr_if.h"
81
82 #ifdef DEBUG
83 #define dprintf(fmt, args...) printf(fmt, ##args)
84 #else
85 #define dprintf(x, arg...)
86 #endif
87
88 #define READ4(_sc, _reg) \
89 bus_read_4((_sc)->res[0], _reg)
90 #define WRITE4(_sc, _reg, _val) \
91 bus_write_4((_sc)->res[0], _reg, _val)
92
93 #define DIV_ROUND_UP(n, d) howmany(n, d)
94
95 #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
96 #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
97 #define DWMMC_LOCK_INIT(_sc) \
98 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
99 "dwmmc", MTX_DEF)
100 #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
101 #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
102 #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
103
104 #define PENDING_CMD 0x01
105 #define PENDING_STOP 0x02
106 #define CARD_INIT_DONE 0x04
107
108 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
109 |SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE)
110 #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
111 |SDMMC_INTMASK_RE)
112 #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
113 |SDMMC_INTMASK_HLE)
114
115 #define DES0_DIC (1 << 1) /* Disable Interrupt on Completion */
116 #define DES0_LD (1 << 2) /* Last Descriptor */
117 #define DES0_FS (1 << 3) /* First Descriptor */
118 #define DES0_CH (1 << 4) /* second address CHained */
119 #define DES0_ER (1 << 5) /* End of Ring */
120 #define DES0_CES (1 << 30) /* Card Error Summary */
121 #define DES0_OWN (1 << 31) /* OWN */
122
123 #define DES1_BS1_MASK 0x1fff
124
125 struct idmac_desc {
126 uint32_t des0; /* control */
127 uint32_t des1; /* bufsize */
128 uint32_t des2; /* buf1 phys addr */
129 uint32_t des3; /* buf2 phys addr or next descr */
130 };
131
132 #define IDMAC_DESC_SEGS (PAGE_SIZE / (sizeof(struct idmac_desc)))
133 #define IDMAC_DESC_SIZE (sizeof(struct idmac_desc) * IDMAC_DESC_SEGS)
134 #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */
135 /*
136 * Size field in DMA descriptor is 13 bits long (up to 4095 bytes),
137 * but must be a multiple of the data bus size.Additionally, we must ensure
138 * that bus_dmamap_load() doesn't additionally fragments buffer (because it
139 * is processed with page size granularity). Thus limit fragment size to half
140 * of page.
141 * XXX switch descriptor format to array and use second buffer pointer for
142 * second half of page
143 */
144 #define IDMAC_MAX_SIZE 2048
145 /*
146 * Busdma may bounce buffers, so we must reserve 2 descriptors
147 * (on start and on end) for bounced fragments.
148 */
149 #define DWMMC_MAX_DATA (IDMAC_MAX_SIZE * (IDMAC_DESC_SEGS - 2)) / MMC_SECTOR_SIZE
150
151 static void dwmmc_next_operation(struct dwmmc_softc *);
152 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
153 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
154 static int dma_stop(struct dwmmc_softc *);
155 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
156 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
157 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present);
158
159 static struct resource_spec dwmmc_spec[] = {
160 { SYS_RES_MEMORY, 0, RF_ACTIVE },
161 { SYS_RES_IRQ, 0, RF_ACTIVE },
162 { -1, 0 }
163 };
164
165 #define HWTYPE_MASK (0x0000ffff)
166 #define HWFLAG_MASK (0xffff << 16)
167
168 static void
dwmmc_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)169 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
170 {
171
172 if (nsegs != 1)
173 panic("%s: nsegs != 1 (%d)\n", __func__, nsegs);
174 if (error != 0)
175 panic("%s: error != 0 (%d)\n", __func__, error);
176
177 *(bus_addr_t *)arg = segs[0].ds_addr;
178 }
179
180 static void
dwmmc_ring_setup(void * arg,bus_dma_segment_t * segs,int nsegs,int error)181 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
182 {
183 struct dwmmc_softc *sc;
184 int idx;
185
186 sc = arg;
187 dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
188 if (error != 0)
189 panic("%s: error != 0 (%d)\n", __func__, error);
190
191 for (idx = 0; idx < nsegs; idx++) {
192 sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH;
193 sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK;
194 sc->desc_ring[idx].des2 = segs[idx].ds_addr;
195
196 if (idx == 0)
197 sc->desc_ring[idx].des0 |= DES0_FS;
198
199 if (idx == (nsegs - 1)) {
200 sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
201 sc->desc_ring[idx].des0 |= DES0_LD;
202 }
203 wmb();
204 sc->desc_ring[idx].des0 |= DES0_OWN;
205 }
206 }
207
208 static int
dwmmc_ctrl_reset(struct dwmmc_softc * sc,int reset_bits)209 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
210 {
211 int reg;
212 int i;
213
214 reg = READ4(sc, SDMMC_CTRL);
215 reg |= (reset_bits);
216 WRITE4(sc, SDMMC_CTRL, reg);
217
218 /* Wait reset done */
219 for (i = 0; i < 100; i++) {
220 if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
221 return (0);
222 DELAY(10);
223 }
224
225 device_printf(sc->dev, "Reset failed\n");
226
227 return (1);
228 }
229
230 static int
dma_setup(struct dwmmc_softc * sc)231 dma_setup(struct dwmmc_softc *sc)
232 {
233 int error;
234 int nidx;
235 int idx;
236
237 /*
238 * Set up TX descriptor ring, descriptors, and dma maps.
239 */
240 error = bus_dma_tag_create(
241 bus_get_dma_tag(sc->dev), /* Parent tag. */
242 4096, 0, /* alignment, boundary */
243 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
244 BUS_SPACE_MAXADDR, /* highaddr */
245 NULL, NULL, /* filter, filterarg */
246 IDMAC_DESC_SIZE, 1, /* maxsize, nsegments */
247 IDMAC_DESC_SIZE, /* maxsegsize */
248 0, /* flags */
249 NULL, NULL, /* lockfunc, lockarg */
250 &sc->desc_tag);
251 if (error != 0) {
252 device_printf(sc->dev,
253 "could not create ring DMA tag.\n");
254 return (1);
255 }
256
257 error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
258 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
259 &sc->desc_map);
260 if (error != 0) {
261 device_printf(sc->dev,
262 "could not allocate descriptor ring.\n");
263 return (1);
264 }
265
266 error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
267 sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr,
268 &sc->desc_ring_paddr, 0);
269 if (error != 0) {
270 device_printf(sc->dev,
271 "could not load descriptor ring map.\n");
272 return (1);
273 }
274
275 for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) {
276 sc->desc_ring[idx].des0 = DES0_CH;
277 sc->desc_ring[idx].des1 = 0;
278 nidx = (idx + 1) % IDMAC_DESC_SEGS;
279 sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
280 (nidx * sizeof(struct idmac_desc));
281 }
282 sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr;
283 sc->desc_ring[idx - 1].des0 |= DES0_ER;
284
285 error = bus_dma_tag_create(
286 bus_get_dma_tag(sc->dev), /* Parent tag. */
287 8, 0, /* alignment, boundary */
288 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
289 BUS_SPACE_MAXADDR, /* highaddr */
290 NULL, NULL, /* filter, filterarg */
291 IDMAC_MAX_SIZE * IDMAC_DESC_SEGS, /* maxsize */
292 IDMAC_DESC_SEGS, /* nsegments */
293 IDMAC_MAX_SIZE, /* maxsegsize */
294 0, /* flags */
295 NULL, NULL, /* lockfunc, lockarg */
296 &sc->buf_tag);
297 if (error != 0) {
298 device_printf(sc->dev,
299 "could not create ring DMA tag.\n");
300 return (1);
301 }
302
303 error = bus_dmamap_create(sc->buf_tag, 0,
304 &sc->buf_map);
305 if (error != 0) {
306 device_printf(sc->dev,
307 "could not create TX buffer DMA map.\n");
308 return (1);
309 }
310
311 return (0);
312 }
313
314 static void
dwmmc_cmd_done(struct dwmmc_softc * sc)315 dwmmc_cmd_done(struct dwmmc_softc *sc)
316 {
317 struct mmc_command *cmd;
318 #ifdef MMCCAM
319 union ccb *ccb;
320 #endif
321
322 #ifdef MMCCAM
323 ccb = sc->ccb;
324 if (ccb == NULL)
325 return;
326 cmd = &ccb->mmcio.cmd;
327 #else
328 cmd = sc->curcmd;
329 #endif
330 if (cmd == NULL)
331 return;
332
333 if (cmd->flags & MMC_RSP_PRESENT) {
334 if (cmd->flags & MMC_RSP_136) {
335 cmd->resp[3] = READ4(sc, SDMMC_RESP0);
336 cmd->resp[2] = READ4(sc, SDMMC_RESP1);
337 cmd->resp[1] = READ4(sc, SDMMC_RESP2);
338 cmd->resp[0] = READ4(sc, SDMMC_RESP3);
339 } else {
340 cmd->resp[3] = 0;
341 cmd->resp[2] = 0;
342 cmd->resp[1] = 0;
343 cmd->resp[0] = READ4(sc, SDMMC_RESP0);
344 }
345 }
346 }
347
348 static void
dwmmc_tasklet(struct dwmmc_softc * sc)349 dwmmc_tasklet(struct dwmmc_softc *sc)
350 {
351 struct mmc_command *cmd;
352
353 cmd = sc->curcmd;
354 if (cmd == NULL)
355 return;
356
357 if (!sc->cmd_done)
358 return;
359
360 if (cmd->error != MMC_ERR_NONE || !cmd->data) {
361 dwmmc_next_operation(sc);
362 } else if (cmd->data && sc->dto_rcvd) {
363 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
364 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
365 sc->use_auto_stop) {
366 if (sc->acd_rcvd)
367 dwmmc_next_operation(sc);
368 } else {
369 dwmmc_next_operation(sc);
370 }
371 }
372 }
373
374 static void
dwmmc_intr(void * arg)375 dwmmc_intr(void *arg)
376 {
377 struct mmc_command *cmd;
378 struct dwmmc_softc *sc;
379 uint32_t reg;
380
381 sc = arg;
382
383 DWMMC_LOCK(sc);
384
385 cmd = sc->curcmd;
386
387 /* First handle SDMMC controller interrupts */
388 reg = READ4(sc, SDMMC_MINTSTS);
389 if (reg) {
390 dprintf("%s 0x%08x\n", __func__, reg);
391
392 if (reg & DWMMC_CMD_ERR_FLAGS) {
393 dprintf("cmd err 0x%08x cmd 0x%08x\n",
394 reg, cmd->opcode);
395 cmd->error = MMC_ERR_TIMEOUT;
396 }
397
398 if (reg & DWMMC_DATA_ERR_FLAGS) {
399 dprintf("data err 0x%08x cmd 0x%08x\n",
400 reg, cmd->opcode);
401 cmd->error = MMC_ERR_FAILED;
402 if (!sc->use_pio) {
403 dma_done(sc, cmd);
404 dma_stop(sc);
405 }
406 }
407
408 if (reg & SDMMC_INTMASK_CMD_DONE) {
409 dwmmc_cmd_done(sc);
410 sc->cmd_done = 1;
411 }
412
413 if (reg & SDMMC_INTMASK_ACD)
414 sc->acd_rcvd = 1;
415
416 if (reg & SDMMC_INTMASK_DTO)
417 sc->dto_rcvd = 1;
418
419 if (reg & SDMMC_INTMASK_CD) {
420 dwmmc_handle_card_present(sc,
421 READ4(sc, SDMMC_CDETECT) == 0 ? true : false);
422 }
423 }
424
425 /* Ack interrupts */
426 WRITE4(sc, SDMMC_RINTSTS, reg);
427
428 if (sc->use_pio) {
429 if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
430 pio_read(sc, cmd);
431 }
432 if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
433 pio_write(sc, cmd);
434 }
435 } else {
436 /* Now handle DMA interrupts */
437 reg = READ4(sc, SDMMC_IDSTS);
438 if (reg) {
439 dprintf("dma intr 0x%08x\n", reg);
440 if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
441 WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
442 SDMMC_IDINTEN_RI));
443 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
444 dma_done(sc, cmd);
445 }
446 }
447 }
448
449 dwmmc_tasklet(sc);
450
451 DWMMC_UNLOCK(sc);
452 }
453
454 static void
dwmmc_handle_card_present(struct dwmmc_softc * sc,bool is_present)455 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
456 {
457 bool was_present;
458
459 if (dumping || SCHEDULER_STOPPED())
460 return;
461
462 was_present = sc->child != NULL;
463
464 if (!was_present && is_present) {
465 taskqueue_enqueue_timeout(taskqueue_swi_giant,
466 &sc->card_delayed_task, -(hz / 2));
467 } else if (was_present && !is_present) {
468 taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task);
469 }
470 }
471
472 static void
dwmmc_card_task(void * arg,int pending __unused)473 dwmmc_card_task(void *arg, int pending __unused)
474 {
475 struct dwmmc_softc *sc = arg;
476
477 #ifdef MMCCAM
478 mmc_cam_sim_discover(&sc->mmc_sim);
479 #else
480 DWMMC_LOCK(sc);
481
482 if (READ4(sc, SDMMC_CDETECT) == 0 ||
483 (sc->mmc_helper.props & MMC_PROP_BROKEN_CD)) {
484 if (sc->child == NULL) {
485 if (bootverbose)
486 device_printf(sc->dev, "Card inserted\n");
487
488 sc->child = device_add_child(sc->dev, "mmc", DEVICE_UNIT_ANY);
489 DWMMC_UNLOCK(sc);
490 if (sc->child) {
491 device_set_ivars(sc->child, sc);
492 (void)device_probe_and_attach(sc->child);
493 }
494 } else
495 DWMMC_UNLOCK(sc);
496 } else {
497 /* Card isn't present, detach if necessary */
498 if (sc->child != NULL) {
499 if (bootverbose)
500 device_printf(sc->dev, "Card removed\n");
501
502 DWMMC_UNLOCK(sc);
503 device_delete_child(sc->dev, sc->child);
504 sc->child = NULL;
505 } else
506 DWMMC_UNLOCK(sc);
507 }
508 #endif /* MMCCAM */
509 }
510
511 static int
parse_fdt(struct dwmmc_softc * sc)512 parse_fdt(struct dwmmc_softc *sc)
513 {
514 pcell_t dts_value[3];
515 phandle_t node;
516 uint32_t bus_hz = 0;
517 int len;
518 int error;
519
520 if ((node = ofw_bus_get_node(sc->dev)) == -1)
521 return (ENXIO);
522
523 /* Set some defaults for freq and supported mode */
524 sc->host.f_min = 400000;
525 sc->host.f_max = 200000000;
526 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
527 sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
528 mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host);
529
530 /* fifo-depth */
531 if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
532 OF_getencprop(node, "fifo-depth", dts_value, len);
533 sc->fifo_depth = dts_value[0];
534 }
535
536 /* num-slots (Deprecated) */
537 sc->num_slots = 1;
538 if ((len = OF_getproplen(node, "num-slots")) > 0) {
539 device_printf(sc->dev, "num-slots property is deprecated\n");
540 OF_getencprop(node, "num-slots", dts_value, len);
541 sc->num_slots = dts_value[0];
542 }
543
544 /* clock-frequency */
545 if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
546 OF_getencprop(node, "clock-frequency", dts_value, len);
547 bus_hz = dts_value[0];
548 }
549
550 /* IP block reset is optional */
551 error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset);
552 if (error != 0 &&
553 error != ENOENT &&
554 error != ENODEV) {
555 device_printf(sc->dev, "Cannot get reset\n");
556 goto fail;
557 }
558
559 /* vmmc regulator is optional */
560 error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply",
561 &sc->vmmc);
562 if (error != 0 &&
563 error != ENOENT &&
564 error != ENODEV) {
565 device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n");
566 goto fail;
567 }
568
569 /* vqmmc regulator is optional */
570 error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply",
571 &sc->vqmmc);
572 if (error != 0 &&
573 error != ENOENT &&
574 error != ENODEV) {
575 device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n");
576 goto fail;
577 }
578
579 /* Assert reset first */
580 if (sc->hwreset != NULL) {
581 error = hwreset_assert(sc->hwreset);
582 if (error != 0) {
583 device_printf(sc->dev, "Cannot assert reset\n");
584 goto fail;
585 }
586 }
587
588 /* BIU (Bus Interface Unit clock) is optional */
589 error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
590 if (error != 0 &&
591 error != ENOENT &&
592 error != ENODEV) {
593 device_printf(sc->dev, "Cannot get 'biu' clock\n");
594 goto fail;
595 }
596
597 if (sc->biu) {
598 error = clk_enable(sc->biu);
599 if (error != 0) {
600 device_printf(sc->dev, "cannot enable biu clock\n");
601 goto fail;
602 }
603 }
604
605 /*
606 * CIU (Controller Interface Unit clock) is mandatory
607 * if no clock-frequency property is given
608 */
609 error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
610 if (error != 0 &&
611 error != ENOENT &&
612 error != ENODEV) {
613 device_printf(sc->dev, "Cannot get 'ciu' clock\n");
614 goto fail;
615 }
616
617 if (sc->ciu) {
618 if (bus_hz != 0) {
619 error = clk_set_freq(sc->ciu, bus_hz, 0);
620 if (error != 0)
621 device_printf(sc->dev,
622 "cannot set ciu clock to %u\n", bus_hz);
623 }
624 error = clk_enable(sc->ciu);
625 if (error != 0) {
626 device_printf(sc->dev, "cannot enable ciu clock\n");
627 goto fail;
628 }
629 clk_get_freq(sc->ciu, &sc->bus_hz);
630 }
631
632 /* Enable regulators */
633 if (sc->vmmc != NULL) {
634 error = regulator_enable(sc->vmmc);
635 if (error != 0) {
636 device_printf(sc->dev, "Cannot enable vmmc regulator\n");
637 goto fail;
638 }
639 }
640 if (sc->vqmmc != NULL) {
641 error = regulator_enable(sc->vqmmc);
642 if (error != 0) {
643 device_printf(sc->dev, "Cannot enable vqmmc regulator\n");
644 goto fail;
645 }
646 }
647
648 /* Take dwmmc out of reset */
649 if (sc->hwreset != NULL) {
650 error = hwreset_deassert(sc->hwreset);
651 if (error != 0) {
652 device_printf(sc->dev, "Cannot deassert reset\n");
653 goto fail;
654 }
655 }
656
657 if (sc->bus_hz == 0) {
658 device_printf(sc->dev, "No bus speed provided\n");
659 goto fail;
660 }
661
662 return (0);
663
664 fail:
665 return (ENXIO);
666 }
667
668 int
dwmmc_attach(device_t dev)669 dwmmc_attach(device_t dev)
670 {
671 struct dwmmc_softc *sc;
672 int error;
673
674 sc = device_get_softc(dev);
675
676 sc->dev = dev;
677
678 /* Why not to use Auto Stop? It save a hundred of irq per second */
679 sc->use_auto_stop = 1;
680
681 error = parse_fdt(sc);
682 if (error != 0) {
683 device_printf(dev, "Can't get FDT property.\n");
684 return (ENXIO);
685 }
686
687 DWMMC_LOCK_INIT(sc);
688
689 if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
690 device_printf(dev, "could not allocate resources\n");
691 return (ENXIO);
692 }
693
694 /* Setup interrupt handler. */
695 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
696 NULL, dwmmc_intr, sc, &sc->intr_cookie);
697 if (error != 0) {
698 device_printf(dev, "could not setup interrupt handler.\n");
699 return (ENXIO);
700 }
701
702 device_printf(dev, "Hardware version ID is %04x\n",
703 READ4(sc, SDMMC_VERID) & 0xffff);
704
705 /* Reset all */
706 if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
707 SDMMC_CTRL_FIFO_RESET |
708 SDMMC_CTRL_DMA_RESET)))
709 return (ENXIO);
710
711 dwmmc_setup_bus(sc, sc->host.f_min);
712
713 if (sc->fifo_depth == 0) {
714 sc->fifo_depth = 1 +
715 ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
716 device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
717 sc->fifo_depth);
718 }
719
720 if (!sc->use_pio) {
721 dma_stop(sc);
722 if (dma_setup(sc))
723 return (ENXIO);
724
725 /* Install desc base */
726 WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
727
728 /* Enable DMA interrupts */
729 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
730 WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
731 SDMMC_IDINTEN_RI |
732 SDMMC_IDINTEN_TI));
733 }
734
735 /* Clear and disable interrups for a while */
736 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
737 WRITE4(sc, SDMMC_INTMASK, 0);
738
739 /* Maximum timeout */
740 WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
741
742 /* Enable interrupts */
743 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
744 WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
745 SDMMC_INTMASK_DTO |
746 SDMMC_INTMASK_ACD |
747 SDMMC_INTMASK_TXDR |
748 SDMMC_INTMASK_RXDR |
749 DWMMC_ERR_FLAGS |
750 SDMMC_INTMASK_CD));
751 WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
752
753 TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
754 TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0,
755 dwmmc_card_task, sc);
756
757 #ifdef MMCCAM
758 sc->ccb = NULL;
759 if (mmc_cam_sim_alloc(dev, "dw_mmc", &sc->mmc_sim) != 0) {
760 device_printf(dev, "cannot alloc cam sim\n");
761 dwmmc_detach(dev);
762 return (ENXIO);
763 }
764 #endif
765 /*
766 * Schedule a card detection as we won't get an interrupt
767 * if the card is inserted when we attach
768 */
769 dwmmc_card_task(sc, 0);
770 return (0);
771 }
772
773 int
dwmmc_detach(device_t dev)774 dwmmc_detach(device_t dev)
775 {
776 struct dwmmc_softc *sc;
777 int ret;
778
779 sc = device_get_softc(dev);
780
781 ret = bus_generic_detach(dev);
782 if (ret != 0)
783 return (ret);
784
785 taskqueue_drain(taskqueue_swi_giant, &sc->card_task);
786 taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task);
787
788 if (sc->intr_cookie != NULL) {
789 ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
790 if (ret != 0)
791 return (ret);
792 }
793 bus_release_resources(dev, dwmmc_spec, sc->res);
794
795 DWMMC_LOCK_DESTROY(sc);
796
797 if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0)
798 device_printf(sc->dev, "cannot deassert reset\n");
799 if (sc->biu != NULL && clk_disable(sc->biu) != 0)
800 device_printf(sc->dev, "cannot disable biu clock\n");
801 if (sc->ciu != NULL && clk_disable(sc->ciu) != 0)
802 device_printf(sc->dev, "cannot disable ciu clock\n");
803
804 if (sc->vmmc && regulator_disable(sc->vmmc) != 0)
805 device_printf(sc->dev, "Cannot disable vmmc regulator\n");
806 if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0)
807 device_printf(sc->dev, "Cannot disable vqmmc regulator\n");
808
809 #ifdef MMCCAM
810 mmc_cam_sim_free(&sc->mmc_sim);
811 #endif
812
813 return (0);
814 }
815
816 static int
dwmmc_setup_bus(struct dwmmc_softc * sc,int freq)817 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
818 {
819 int tout;
820 int div;
821
822 if (freq == 0) {
823 WRITE4(sc, SDMMC_CLKENA, 0);
824 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
825 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
826
827 tout = 1000;
828 do {
829 if (tout-- < 0) {
830 device_printf(sc->dev, "Failed update clk\n");
831 return (1);
832 }
833 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
834
835 return (0);
836 }
837
838 WRITE4(sc, SDMMC_CLKENA, 0);
839 WRITE4(sc, SDMMC_CLKSRC, 0);
840
841 div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
842
843 WRITE4(sc, SDMMC_CLKDIV, div);
844 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
845 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
846
847 tout = 1000;
848 do {
849 if (tout-- < 0) {
850 device_printf(sc->dev, "Failed to update clk\n");
851 return (1);
852 }
853 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
854
855 WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
856 WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
857 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
858
859 tout = 1000;
860 do {
861 if (tout-- < 0) {
862 device_printf(sc->dev, "Failed to enable clk\n");
863 return (1);
864 }
865 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
866
867 return (0);
868 }
869
870 static int
dwmmc_update_ios(device_t brdev,device_t reqdev)871 dwmmc_update_ios(device_t brdev, device_t reqdev)
872 {
873 struct dwmmc_softc *sc;
874 struct mmc_ios *ios;
875 uint32_t reg;
876 int ret = 0;
877
878 sc = device_get_softc(brdev);
879 ios = &sc->host.ios;
880
881 dprintf("Setting up clk %u bus_width %d, timing: %d\n",
882 ios->clock, ios->bus_width, ios->timing);
883
884 switch (ios->power_mode) {
885 case power_on:
886 break;
887 case power_off:
888 WRITE4(sc, SDMMC_PWREN, 0);
889 break;
890 case power_up:
891 WRITE4(sc, SDMMC_PWREN, 1);
892 break;
893 }
894
895 mmc_fdt_set_power(&sc->mmc_helper, ios->power_mode);
896
897 if (ios->bus_width == bus_width_8)
898 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
899 else if (ios->bus_width == bus_width_4)
900 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
901 else
902 WRITE4(sc, SDMMC_CTYPE, 0);
903
904 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
905 /* XXX: take care about DDR or SDR use here */
906 WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
907 }
908
909 /* Set DDR mode */
910 reg = READ4(sc, SDMMC_UHS_REG);
911 if (ios->timing == bus_timing_uhs_ddr50 ||
912 ios->timing == bus_timing_mmc_ddr52 ||
913 ios->timing == bus_timing_mmc_hs400)
914 reg |= (SDMMC_UHS_REG_DDR);
915 else
916 reg &= ~(SDMMC_UHS_REG_DDR);
917 WRITE4(sc, SDMMC_UHS_REG, reg);
918
919 if (sc->update_ios)
920 ret = sc->update_ios(sc, ios);
921
922 dwmmc_setup_bus(sc, ios->clock);
923
924 return (ret);
925 }
926
927 static int
dma_done(struct dwmmc_softc * sc,struct mmc_command * cmd)928 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
929 {
930 struct mmc_data *data;
931
932 data = cmd->data;
933
934 if (data->flags & MMC_DATA_WRITE)
935 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
936 BUS_DMASYNC_POSTWRITE);
937 else
938 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
939 BUS_DMASYNC_POSTREAD);
940
941 bus_dmamap_sync(sc->desc_tag, sc->desc_map,
942 BUS_DMASYNC_POSTWRITE);
943
944 bus_dmamap_unload(sc->buf_tag, sc->buf_map);
945
946 return (0);
947 }
948
949 static int
dma_stop(struct dwmmc_softc * sc)950 dma_stop(struct dwmmc_softc *sc)
951 {
952 int reg;
953
954 reg = READ4(sc, SDMMC_CTRL);
955 reg &= ~(SDMMC_CTRL_USE_IDMAC);
956 reg |= (SDMMC_CTRL_DMA_RESET);
957 WRITE4(sc, SDMMC_CTRL, reg);
958
959 reg = READ4(sc, SDMMC_BMOD);
960 reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
961 reg |= (SDMMC_BMOD_SWR);
962 WRITE4(sc, SDMMC_BMOD, reg);
963
964 return (0);
965 }
966
967 static int
dma_prepare(struct dwmmc_softc * sc,struct mmc_command * cmd)968 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
969 {
970 struct mmc_data *data;
971 int err;
972 int reg;
973
974 data = cmd->data;
975
976 reg = READ4(sc, SDMMC_INTMASK);
977 reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
978 WRITE4(sc, SDMMC_INTMASK, reg);
979 dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len);
980 err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
981 data->data, data->len, dwmmc_ring_setup,
982 sc, BUS_DMA_NOWAIT);
983 if (err != 0)
984 panic("dmamap_load failed\n");
985
986 /* Ensure the device can see the desc */
987 bus_dmamap_sync(sc->desc_tag, sc->desc_map,
988 BUS_DMASYNC_PREWRITE);
989
990 if (data->flags & MMC_DATA_WRITE)
991 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
992 BUS_DMASYNC_PREWRITE);
993 else
994 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
995 BUS_DMASYNC_PREREAD);
996
997 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
998 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
999 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1000
1001 WRITE4(sc, SDMMC_FIFOTH, reg);
1002 wmb();
1003
1004 reg = READ4(sc, SDMMC_CTRL);
1005 reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
1006 WRITE4(sc, SDMMC_CTRL, reg);
1007 wmb();
1008
1009 reg = READ4(sc, SDMMC_BMOD);
1010 reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
1011 WRITE4(sc, SDMMC_BMOD, reg);
1012
1013 /* Start */
1014 WRITE4(sc, SDMMC_PLDMND, 1);
1015
1016 return (0);
1017 }
1018
1019 static int
pio_prepare(struct dwmmc_softc * sc,struct mmc_command * cmd)1020 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
1021 {
1022 struct mmc_data *data;
1023 int reg;
1024
1025 data = cmd->data;
1026 data->xfer_len = 0;
1027
1028 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1029 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1030 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1031
1032 WRITE4(sc, SDMMC_FIFOTH, reg);
1033 wmb();
1034
1035 return (0);
1036 }
1037
1038 static void
pio_read(struct dwmmc_softc * sc,struct mmc_command * cmd)1039 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
1040 {
1041 struct mmc_data *data;
1042 uint32_t *p, status;
1043
1044 if (cmd == NULL || cmd->data == NULL)
1045 return;
1046
1047 data = cmd->data;
1048 if ((data->flags & MMC_DATA_READ) == 0)
1049 return;
1050
1051 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1052 p = (uint32_t *)data->data + (data->xfer_len >> 2);
1053
1054 while (data->xfer_len < data->len) {
1055 status = READ4(sc, SDMMC_STATUS);
1056 if (status & SDMMC_STATUS_FIFO_EMPTY)
1057 break;
1058 *p++ = READ4(sc, SDMMC_DATA);
1059 data->xfer_len += 4;
1060 }
1061
1062 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
1063 }
1064
1065 static void
pio_write(struct dwmmc_softc * sc,struct mmc_command * cmd)1066 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
1067 {
1068 struct mmc_data *data;
1069 uint32_t *p, status;
1070
1071 if (cmd == NULL || cmd->data == NULL)
1072 return;
1073
1074 data = cmd->data;
1075 if ((data->flags & MMC_DATA_WRITE) == 0)
1076 return;
1077
1078 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1079 p = (uint32_t *)data->data + (data->xfer_len >> 2);
1080
1081 while (data->xfer_len < data->len) {
1082 status = READ4(sc, SDMMC_STATUS);
1083 if (status & SDMMC_STATUS_FIFO_FULL)
1084 break;
1085 WRITE4(sc, SDMMC_DATA, *p++);
1086 data->xfer_len += 4;
1087 }
1088
1089 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
1090 }
1091
1092 static void
dwmmc_start_cmd(struct dwmmc_softc * sc,struct mmc_command * cmd)1093 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
1094 {
1095 struct mmc_data *data;
1096 uint32_t blksz;
1097 uint32_t cmdr;
1098
1099 dprintf("%s\n", __func__);
1100 sc->curcmd = cmd;
1101 data = cmd->data;
1102
1103 #ifndef MMCCAM
1104 /* XXX Upper layers don't always set this */
1105 cmd->mrq = sc->req;
1106 #endif
1107 /* Begin setting up command register. */
1108
1109 cmdr = cmd->opcode;
1110
1111 dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
1112
1113 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
1114 cmd->opcode == MMC_GO_IDLE_STATE ||
1115 cmd->opcode == MMC_GO_INACTIVE_STATE)
1116 cmdr |= SDMMC_CMD_STOP_ABORT;
1117 else if (cmd->opcode != MMC_SEND_STATUS && data)
1118 cmdr |= SDMMC_CMD_WAIT_PRVDATA;
1119
1120 /* Set up response handling. */
1121 if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
1122 cmdr |= SDMMC_CMD_RESP_EXP;
1123 if (cmd->flags & MMC_RSP_136)
1124 cmdr |= SDMMC_CMD_RESP_LONG;
1125 }
1126
1127 if (cmd->flags & MMC_RSP_CRC)
1128 cmdr |= SDMMC_CMD_RESP_CRC;
1129
1130 /*
1131 * XXX: Not all platforms want this.
1132 */
1133 cmdr |= SDMMC_CMD_USE_HOLD_REG;
1134
1135 if ((sc->flags & CARD_INIT_DONE) == 0) {
1136 sc->flags |= (CARD_INIT_DONE);
1137 cmdr |= SDMMC_CMD_SEND_INIT;
1138 }
1139
1140 if (data) {
1141 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1142 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
1143 sc->use_auto_stop)
1144 cmdr |= SDMMC_CMD_SEND_ASTOP;
1145
1146 cmdr |= SDMMC_CMD_DATA_EXP;
1147 if (data->flags & MMC_DATA_STREAM)
1148 cmdr |= SDMMC_CMD_MODE_STREAM;
1149 if (data->flags & MMC_DATA_WRITE)
1150 cmdr |= SDMMC_CMD_DATA_WRITE;
1151
1152 WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
1153 #ifdef MMCCAM
1154 if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1155 WRITE4(sc, SDMMC_BLKSIZ, cmd->data->block_size);
1156 WRITE4(sc, SDMMC_BYTCNT, cmd->data->len);
1157 } else
1158 #endif
1159 {
1160 WRITE4(sc, SDMMC_BYTCNT, data->len);
1161 blksz = (data->len < MMC_SECTOR_SIZE) ? \
1162 data->len : MMC_SECTOR_SIZE;
1163 WRITE4(sc, SDMMC_BLKSIZ, blksz);
1164 }
1165
1166 if (sc->use_pio) {
1167 pio_prepare(sc, cmd);
1168 } else {
1169 dma_prepare(sc, cmd);
1170 }
1171 wmb();
1172 }
1173
1174 dprintf("cmdr 0x%08x\n", cmdr);
1175
1176 WRITE4(sc, SDMMC_CMDARG, cmd->arg);
1177 wmb();
1178 WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
1179 };
1180
1181 static void
dwmmc_next_operation(struct dwmmc_softc * sc)1182 dwmmc_next_operation(struct dwmmc_softc *sc)
1183 {
1184 struct mmc_command *cmd;
1185 dprintf("%s\n", __func__);
1186 #ifdef MMCCAM
1187 union ccb *ccb;
1188
1189 ccb = sc->ccb;
1190 if (ccb == NULL)
1191 return;
1192 cmd = &ccb->mmcio.cmd;
1193 #else
1194 struct mmc_request *req;
1195
1196 req = sc->req;
1197 if (req == NULL)
1198 return;
1199 cmd = req->cmd;
1200 #endif
1201
1202 sc->acd_rcvd = 0;
1203 sc->dto_rcvd = 0;
1204 sc->cmd_done = 0;
1205
1206 /*
1207 * XXX: Wait until card is still busy.
1208 * We do need this to prevent data timeouts,
1209 * mostly caused by multi-block write command
1210 * followed by single-read.
1211 */
1212 while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1213 continue;
1214
1215 if (sc->flags & PENDING_CMD) {
1216 sc->flags &= ~PENDING_CMD;
1217 dwmmc_start_cmd(sc, cmd);
1218 return;
1219 } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1220 sc->flags &= ~PENDING_STOP;
1221 /// XXX: What to do with this?
1222 //dwmmc_start_cmd(sc, req->stop);
1223 return;
1224 }
1225
1226 #ifdef MMCCAM
1227 sc->ccb = NULL;
1228 sc->curcmd = NULL;
1229 ccb->ccb_h.status =
1230 (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
1231 xpt_done(ccb);
1232 #else
1233 sc->req = NULL;
1234 sc->curcmd = NULL;
1235 req->done(req);
1236 #endif
1237 }
1238
1239 static int
dwmmc_request(device_t brdev,device_t reqdev,struct mmc_request * req)1240 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1241 {
1242 struct dwmmc_softc *sc;
1243
1244 sc = device_get_softc(brdev);
1245
1246 dprintf("%s\n", __func__);
1247
1248 DWMMC_LOCK(sc);
1249
1250 #ifdef MMCCAM
1251 sc->flags |= PENDING_CMD;
1252 #else
1253 if (sc->req != NULL) {
1254 DWMMC_UNLOCK(sc);
1255 return (EBUSY);
1256 }
1257
1258 sc->req = req;
1259 sc->flags |= PENDING_CMD;
1260 if (sc->req->stop)
1261 sc->flags |= PENDING_STOP;
1262 #endif
1263 dwmmc_next_operation(sc);
1264
1265 DWMMC_UNLOCK(sc);
1266 return (0);
1267 }
1268
1269 #ifndef MMCCAM
1270 static int
dwmmc_get_ro(device_t brdev,device_t reqdev)1271 dwmmc_get_ro(device_t brdev, device_t reqdev)
1272 {
1273
1274 dprintf("%s\n", __func__);
1275
1276 return (0);
1277 }
1278
1279 static int
dwmmc_acquire_host(device_t brdev,device_t reqdev)1280 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1281 {
1282 struct dwmmc_softc *sc;
1283
1284 sc = device_get_softc(brdev);
1285
1286 DWMMC_LOCK(sc);
1287 while (sc->bus_busy)
1288 msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1289 sc->bus_busy++;
1290 DWMMC_UNLOCK(sc);
1291 return (0);
1292 }
1293
1294 static int
dwmmc_release_host(device_t brdev,device_t reqdev)1295 dwmmc_release_host(device_t brdev, device_t reqdev)
1296 {
1297 struct dwmmc_softc *sc;
1298
1299 sc = device_get_softc(brdev);
1300
1301 DWMMC_LOCK(sc);
1302 sc->bus_busy--;
1303 wakeup(sc);
1304 DWMMC_UNLOCK(sc);
1305 return (0);
1306 }
1307 #endif /* !MMCCAM */
1308
1309 static int
dwmmc_read_ivar(device_t bus,device_t child,int which,uintptr_t * result)1310 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1311 {
1312 struct dwmmc_softc *sc;
1313
1314 sc = device_get_softc(bus);
1315
1316 switch (which) {
1317 default:
1318 return (EINVAL);
1319 case MMCBR_IVAR_BUS_MODE:
1320 *(int *)result = sc->host.ios.bus_mode;
1321 break;
1322 case MMCBR_IVAR_BUS_WIDTH:
1323 *(int *)result = sc->host.ios.bus_width;
1324 break;
1325 case MMCBR_IVAR_CHIP_SELECT:
1326 *(int *)result = sc->host.ios.chip_select;
1327 break;
1328 case MMCBR_IVAR_CLOCK:
1329 *(int *)result = sc->host.ios.clock;
1330 break;
1331 case MMCBR_IVAR_F_MIN:
1332 *(int *)result = sc->host.f_min;
1333 break;
1334 case MMCBR_IVAR_F_MAX:
1335 *(int *)result = sc->host.f_max;
1336 break;
1337 case MMCBR_IVAR_HOST_OCR:
1338 *(int *)result = sc->host.host_ocr;
1339 break;
1340 case MMCBR_IVAR_MODE:
1341 *(int *)result = sc->host.mode;
1342 break;
1343 case MMCBR_IVAR_OCR:
1344 *(int *)result = sc->host.ocr;
1345 break;
1346 case MMCBR_IVAR_POWER_MODE:
1347 *(int *)result = sc->host.ios.power_mode;
1348 break;
1349 case MMCBR_IVAR_VDD:
1350 *(int *)result = sc->host.ios.vdd;
1351 break;
1352 case MMCBR_IVAR_VCCQ:
1353 *(int *)result = sc->host.ios.vccq;
1354 break;
1355 case MMCBR_IVAR_CAPS:
1356 *(int *)result = sc->host.caps;
1357 break;
1358 case MMCBR_IVAR_MAX_DATA:
1359 *(int *)result = DWMMC_MAX_DATA;
1360 break;
1361 case MMCBR_IVAR_TIMING:
1362 *(int *)result = sc->host.ios.timing;
1363 break;
1364 }
1365 return (0);
1366 }
1367
1368 static int
dwmmc_write_ivar(device_t bus,device_t child,int which,uintptr_t value)1369 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1370 {
1371 struct dwmmc_softc *sc;
1372
1373 sc = device_get_softc(bus);
1374
1375 switch (which) {
1376 default:
1377 return (EINVAL);
1378 case MMCBR_IVAR_BUS_MODE:
1379 sc->host.ios.bus_mode = value;
1380 break;
1381 case MMCBR_IVAR_BUS_WIDTH:
1382 sc->host.ios.bus_width = value;
1383 break;
1384 case MMCBR_IVAR_CHIP_SELECT:
1385 sc->host.ios.chip_select = value;
1386 break;
1387 case MMCBR_IVAR_CLOCK:
1388 sc->host.ios.clock = value;
1389 break;
1390 case MMCBR_IVAR_MODE:
1391 sc->host.mode = value;
1392 break;
1393 case MMCBR_IVAR_OCR:
1394 sc->host.ocr = value;
1395 break;
1396 case MMCBR_IVAR_POWER_MODE:
1397 sc->host.ios.power_mode = value;
1398 break;
1399 case MMCBR_IVAR_VDD:
1400 sc->host.ios.vdd = value;
1401 break;
1402 case MMCBR_IVAR_TIMING:
1403 sc->host.ios.timing = value;
1404 break;
1405 case MMCBR_IVAR_VCCQ:
1406 sc->host.ios.vccq = value;
1407 break;
1408 /* These are read-only */
1409 case MMCBR_IVAR_CAPS:
1410 case MMCBR_IVAR_HOST_OCR:
1411 case MMCBR_IVAR_F_MIN:
1412 case MMCBR_IVAR_F_MAX:
1413 case MMCBR_IVAR_MAX_DATA:
1414 return (EINVAL);
1415 }
1416 return (0);
1417 }
1418
1419 #ifdef MMCCAM
1420 /* Note: this function likely belongs to the specific driver impl */
1421 static int
dwmmc_switch_vccq(device_t dev,device_t child)1422 dwmmc_switch_vccq(device_t dev, device_t child)
1423 {
1424 device_printf(dev, "This is a default impl of switch_vccq() that always fails\n");
1425 return EINVAL;
1426 }
1427
1428 static int
dwmmc_get_tran_settings(device_t dev,struct ccb_trans_settings_mmc * cts)1429 dwmmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1430 {
1431 struct dwmmc_softc *sc;
1432
1433 sc = device_get_softc(dev);
1434
1435 cts->host_ocr = sc->host.host_ocr;
1436 cts->host_f_min = sc->host.f_min;
1437 cts->host_f_max = sc->host.f_max;
1438 cts->host_caps = sc->host.caps;
1439 cts->host_max_data = DWMMC_MAX_DATA;
1440 memcpy(&cts->ios, &sc->host.ios, sizeof(struct mmc_ios));
1441
1442 return (0);
1443 }
1444
1445 static int
dwmmc_set_tran_settings(device_t dev,struct ccb_trans_settings_mmc * cts)1446 dwmmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1447 {
1448 struct dwmmc_softc *sc;
1449 struct mmc_ios *ios;
1450 struct mmc_ios *new_ios;
1451 int res;
1452
1453 sc = device_get_softc(dev);
1454 ios = &sc->host.ios;
1455
1456 new_ios = &cts->ios;
1457
1458 /* Update only requested fields */
1459 if (cts->ios_valid & MMC_CLK) {
1460 ios->clock = new_ios->clock;
1461 if (bootverbose)
1462 device_printf(sc->dev, "Clock => %d\n", ios->clock);
1463 }
1464 if (cts->ios_valid & MMC_VDD) {
1465 ios->vdd = new_ios->vdd;
1466 if (bootverbose)
1467 device_printf(sc->dev, "VDD => %d\n", ios->vdd);
1468 }
1469 if (cts->ios_valid & MMC_CS) {
1470 ios->chip_select = new_ios->chip_select;
1471 if (bootverbose)
1472 device_printf(sc->dev, "CS => %d\n", ios->chip_select);
1473 }
1474 if (cts->ios_valid & MMC_BW) {
1475 ios->bus_width = new_ios->bus_width;
1476 if (bootverbose)
1477 device_printf(sc->dev, "Bus width => %d\n", ios->bus_width);
1478 }
1479 if (cts->ios_valid & MMC_PM) {
1480 ios->power_mode = new_ios->power_mode;
1481 if (bootverbose)
1482 device_printf(sc->dev, "Power mode => %d\n", ios->power_mode);
1483 }
1484 if (cts->ios_valid & MMC_BT) {
1485 ios->timing = new_ios->timing;
1486 if (bootverbose)
1487 device_printf(sc->dev, "Timing => %d\n", ios->timing);
1488 }
1489 if (cts->ios_valid & MMC_BM) {
1490 ios->bus_mode = new_ios->bus_mode;
1491 if (bootverbose)
1492 device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode);
1493 }
1494 if (cts->ios_valid & MMC_VCCQ) {
1495 ios->vccq = new_ios->vccq;
1496 if (bootverbose)
1497 device_printf(sc->dev, "VCCQ => %d\n", ios->vccq);
1498 res = dwmmc_switch_vccq(sc->dev, NULL);
1499 device_printf(sc->dev, "VCCQ switch result: %d\n", res);
1500 }
1501
1502 return (dwmmc_update_ios(sc->dev, NULL));
1503 }
1504
1505 static int
dwmmc_cam_request(device_t dev,union ccb * ccb)1506 dwmmc_cam_request(device_t dev, union ccb *ccb)
1507 {
1508 struct dwmmc_softc *sc;
1509 struct ccb_mmcio *mmcio;
1510
1511 sc = device_get_softc(dev);
1512 mmcio = &ccb->mmcio;
1513
1514 DWMMC_LOCK(sc);
1515
1516 #ifdef DEBUG
1517 if (__predict_false(bootverbose)) {
1518 device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1519 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
1520 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
1521 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
1522 }
1523 #endif
1524 if (mmcio->cmd.data != NULL) {
1525 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
1526 panic("data->len = %d, data->flags = %d -- something is b0rked",
1527 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
1528 }
1529 if (sc->ccb != NULL) {
1530 device_printf(sc->dev, "Controller still has an active command\n");
1531 return (EBUSY);
1532 }
1533 sc->ccb = ccb;
1534 DWMMC_UNLOCK(sc);
1535 dwmmc_request(sc->dev, NULL, NULL);
1536
1537 return (0);
1538 }
1539
1540 static void
dwmmc_cam_poll(device_t dev)1541 dwmmc_cam_poll(device_t dev)
1542 {
1543 struct dwmmc_softc *sc;
1544
1545 sc = device_get_softc(dev);
1546 dwmmc_intr(sc);
1547 }
1548 #endif /* MMCCAM */
1549
1550 static device_method_t dwmmc_methods[] = {
1551 /* Bus interface */
1552 DEVMETHOD(bus_read_ivar, dwmmc_read_ivar),
1553 DEVMETHOD(bus_write_ivar, dwmmc_write_ivar),
1554
1555 #ifndef MMCCAM
1556 /* mmcbr_if */
1557 DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios),
1558 DEVMETHOD(mmcbr_request, dwmmc_request),
1559 DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro),
1560 DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host),
1561 DEVMETHOD(mmcbr_release_host, dwmmc_release_host),
1562 #endif
1563
1564 #ifdef MMCCAM
1565 /* MMCCAM interface */
1566 DEVMETHOD(mmc_sim_get_tran_settings, dwmmc_get_tran_settings),
1567 DEVMETHOD(mmc_sim_set_tran_settings, dwmmc_set_tran_settings),
1568 DEVMETHOD(mmc_sim_cam_request, dwmmc_cam_request),
1569 DEVMETHOD(mmc_sim_cam_poll, dwmmc_cam_poll),
1570
1571 DEVMETHOD(bus_add_child, bus_generic_add_child),
1572 #endif
1573
1574 DEVMETHOD_END
1575 };
1576
1577 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1578 sizeof(struct dwmmc_softc));
1579