xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision 396c556d77189a5c474d35cec6f44a762e310b7d)
1 /*-
2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/module.h>
44 #include <sys/malloc.h>
45 #include <sys/rman.h>
46 
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcbrvar.h>
49 
50 #include <dev/fdt/fdt_common.h>
51 #include <dev/ofw/openfirm.h>
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54 
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/intr.h>
58 
59 #include <dev/mmc/host/dwmmc_reg.h>
60 #include <dev/mmc/host/dwmmc_var.h>
61 
62 #include "opt_mmccam.h"
63 
64 #include "mmcbr_if.h"
65 
66 #define dprintf(x, arg...)
67 
68 #define	READ4(_sc, _reg) \
69 	bus_read_4((_sc)->res[0], _reg)
70 #define	WRITE4(_sc, _reg, _val) \
71 	bus_write_4((_sc)->res[0], _reg, _val)
72 
73 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
74 
75 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
76 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
77 #define	DWMMC_LOCK_INIT(_sc) \
78 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
79 	    "dwmmc", MTX_DEF)
80 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
81 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
82 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
83 
84 #define	PENDING_CMD	0x01
85 #define	PENDING_STOP	0x02
86 #define	CARD_INIT_DONE	0x04
87 
88 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
89 				|SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \
90 				|SDMMC_INTMASK_EBE)
91 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
92 				|SDMMC_INTMASK_RE)
93 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
94 				|SDMMC_INTMASK_HLE)
95 
96 #define	DES0_DIC	(1 << 1)
97 #define	DES0_LD		(1 << 2)
98 #define	DES0_FS		(1 << 3)
99 #define	DES0_CH		(1 << 4)
100 #define	DES0_ER		(1 << 5)
101 #define	DES0_CES	(1 << 30)
102 #define	DES0_OWN	(1 << 31)
103 
104 #define	DES1_BS1_MASK	0xfff
105 #define	DES1_BS1_SHIFT	0
106 
107 struct idmac_desc {
108 	uint32_t	des0;	/* control */
109 	uint32_t	des1;	/* bufsize */
110 	uint32_t	des2;	/* buf1 phys addr */
111 	uint32_t	des3;	/* buf2 phys addr or next descr */
112 };
113 
114 #define	DESC_MAX	256
115 #define	DESC_SIZE	(sizeof(struct idmac_desc) * DESC_MAX)
116 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
117 
118 static void dwmmc_next_operation(struct dwmmc_softc *);
119 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
120 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
121 static int dma_stop(struct dwmmc_softc *);
122 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
123 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
124 
125 static struct resource_spec dwmmc_spec[] = {
126 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
127 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
128 	{ -1, 0 }
129 };
130 
131 #define	HWTYPE_MASK		(0x0000ffff)
132 #define	HWFLAG_MASK		(0xffff << 16)
133 
134 static struct ofw_compat_data compat_data[] = {
135 	{"altr,socfpga-dw-mshc",	HWTYPE_ALTERA},
136 	{"samsung,exynos5420-dw-mshc",	HWTYPE_EXYNOS},
137 	{"rockchip,rk2928-dw-mshc",	HWTYPE_ROCKCHIP},
138 	{NULL,				HWTYPE_NONE},
139 };
140 
141 static void
142 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
143 {
144 
145 	if (error != 0)
146 		return;
147 	*(bus_addr_t *)arg = segs[0].ds_addr;
148 }
149 
150 static void
151 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
152 {
153 	struct dwmmc_softc *sc;
154 	int idx;
155 
156 	if (error != 0)
157 		return;
158 
159 	sc = arg;
160 
161 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
162 
163 	for (idx = 0; idx < nsegs; idx++) {
164 		sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH);
165 		sc->desc_ring[idx].des1 = segs[idx].ds_len;
166 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
167 
168 		if (idx == 0)
169 			sc->desc_ring[idx].des0 |= DES0_FS;
170 
171 		if (idx == (nsegs - 1)) {
172 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
173 			sc->desc_ring[idx].des0 |= DES0_LD;
174 		}
175 	}
176 }
177 
178 static int
179 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
180 {
181 	int reg;
182 	int i;
183 
184 	reg = READ4(sc, SDMMC_CTRL);
185 	reg |= (reset_bits);
186 	WRITE4(sc, SDMMC_CTRL, reg);
187 
188 	/* Wait reset done */
189 	for (i = 0; i < 100; i++) {
190 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
191 			return (0);
192 		DELAY(10);
193 	}
194 
195 	device_printf(sc->dev, "Reset failed\n");
196 
197 	return (1);
198 }
199 
200 static int
201 dma_setup(struct dwmmc_softc *sc)
202 {
203 	int error;
204 	int nidx;
205 	int idx;
206 
207 	/*
208 	 * Set up TX descriptor ring, descriptors, and dma maps.
209 	 */
210 	error = bus_dma_tag_create(
211 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
212 	    4096, 0,			/* alignment, boundary */
213 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
214 	    BUS_SPACE_MAXADDR,		/* highaddr */
215 	    NULL, NULL,			/* filter, filterarg */
216 	    DESC_SIZE, 1, 		/* maxsize, nsegments */
217 	    DESC_SIZE,			/* maxsegsize */
218 	    0,				/* flags */
219 	    NULL, NULL,			/* lockfunc, lockarg */
220 	    &sc->desc_tag);
221 	if (error != 0) {
222 		device_printf(sc->dev,
223 		    "could not create ring DMA tag.\n");
224 		return (1);
225 	}
226 
227 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
228 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
229 	    &sc->desc_map);
230 	if (error != 0) {
231 		device_printf(sc->dev,
232 		    "could not allocate descriptor ring.\n");
233 		return (1);
234 	}
235 
236 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
237 	    sc->desc_ring, DESC_SIZE, dwmmc_get1paddr,
238 	    &sc->desc_ring_paddr, 0);
239 	if (error != 0) {
240 		device_printf(sc->dev,
241 		    "could not load descriptor ring map.\n");
242 		return (1);
243 	}
244 
245 	for (idx = 0; idx < sc->desc_count; idx++) {
246 		sc->desc_ring[idx].des0 = DES0_CH;
247 		sc->desc_ring[idx].des1 = 0;
248 		nidx = (idx + 1) % sc->desc_count;
249 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
250 		    (nidx * sizeof(struct idmac_desc));
251 	}
252 
253 	error = bus_dma_tag_create(
254 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
255 	    4096, 0,			/* alignment, boundary */
256 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
257 	    BUS_SPACE_MAXADDR,		/* highaddr */
258 	    NULL, NULL,			/* filter, filterarg */
259 	    sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */
260 	    sc->desc_count,		/* nsegments */
261 	    MMC_SECTOR_SIZE,		/* maxsegsize */
262 	    0,				/* flags */
263 	    NULL, NULL,			/* lockfunc, lockarg */
264 	    &sc->buf_tag);
265 	if (error != 0) {
266 		device_printf(sc->dev,
267 		    "could not create ring DMA tag.\n");
268 		return (1);
269 	}
270 
271 	error = bus_dmamap_create(sc->buf_tag, 0,
272 	    &sc->buf_map);
273 	if (error != 0) {
274 		device_printf(sc->dev,
275 		    "could not create TX buffer DMA map.\n");
276 		return (1);
277 	}
278 
279 	return (0);
280 }
281 
282 static void
283 dwmmc_cmd_done(struct dwmmc_softc *sc)
284 {
285 	struct mmc_command *cmd;
286 
287 	cmd = sc->curcmd;
288 	if (cmd == NULL)
289 		return;
290 
291 	if (cmd->flags & MMC_RSP_PRESENT) {
292 		if (cmd->flags & MMC_RSP_136) {
293 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
294 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
295 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
296 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
297 		} else {
298 			cmd->resp[3] = 0;
299 			cmd->resp[2] = 0;
300 			cmd->resp[1] = 0;
301 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
302 		}
303 	}
304 }
305 
306 static void
307 dwmmc_tasklet(struct dwmmc_softc *sc)
308 {
309 	struct mmc_command *cmd;
310 
311 	cmd = sc->curcmd;
312 	if (cmd == NULL)
313 		return;
314 
315 	if (!sc->cmd_done)
316 		return;
317 
318 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
319 		dwmmc_next_operation(sc);
320 	} else if (cmd->data && sc->dto_rcvd) {
321 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
322 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
323 		     sc->use_auto_stop) {
324 			if (sc->acd_rcvd)
325 				dwmmc_next_operation(sc);
326 		} else {
327 			dwmmc_next_operation(sc);
328 		}
329 	}
330 }
331 
332 static void
333 dwmmc_intr(void *arg)
334 {
335 	struct mmc_command *cmd;
336 	struct dwmmc_softc *sc;
337 	uint32_t reg;
338 
339 	sc = arg;
340 
341 	DWMMC_LOCK(sc);
342 
343 	cmd = sc->curcmd;
344 
345 	/* First handle SDMMC controller interrupts */
346 	reg = READ4(sc, SDMMC_MINTSTS);
347 	if (reg) {
348 		dprintf("%s 0x%08x\n", __func__, reg);
349 
350 		if (reg & DWMMC_CMD_ERR_FLAGS) {
351 			WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS);
352 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
353 				reg, cmd->opcode);
354 			cmd->error = MMC_ERR_TIMEOUT;
355 		}
356 
357 		if (reg & DWMMC_DATA_ERR_FLAGS) {
358 			WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS);
359 			dprintf("data err 0x%08x cmd 0x%08x\n",
360 				reg, cmd->opcode);
361 			cmd->error = MMC_ERR_FAILED;
362 			if (!sc->use_pio) {
363 				dma_done(sc, cmd);
364 				dma_stop(sc);
365 			}
366 		}
367 
368 		if (reg & SDMMC_INTMASK_CMD_DONE) {
369 			dwmmc_cmd_done(sc);
370 			sc->cmd_done = 1;
371 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE);
372 		}
373 
374 		if (reg & SDMMC_INTMASK_ACD) {
375 			sc->acd_rcvd = 1;
376 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD);
377 		}
378 
379 		if (reg & SDMMC_INTMASK_DTO) {
380 			sc->dto_rcvd = 1;
381 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO);
382 		}
383 
384 		if (reg & SDMMC_INTMASK_CD) {
385 			/* XXX: Handle card detect */
386 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD);
387 		}
388 	}
389 
390 	if (sc->use_pio) {
391 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
392 			pio_read(sc, cmd);
393 		}
394 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
395 			pio_write(sc, cmd);
396 		}
397 	} else {
398 		/* Now handle DMA interrupts */
399 		reg = READ4(sc, SDMMC_IDSTS);
400 		if (reg) {
401 			dprintf("dma intr 0x%08x\n", reg);
402 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
403 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
404 							 SDMMC_IDINTEN_RI));
405 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
406 				dma_done(sc, cmd);
407 			}
408 		}
409 	}
410 
411 	dwmmc_tasklet(sc);
412 
413 	DWMMC_UNLOCK(sc);
414 }
415 
416 static int
417 parse_fdt(struct dwmmc_softc *sc)
418 {
419 	pcell_t dts_value[3];
420 	phandle_t node;
421 	int len;
422 
423 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
424 		return (ENXIO);
425 
426 	/* fifo-depth */
427 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
428 		OF_getencprop(node, "fifo-depth", dts_value, len);
429 		sc->fifo_depth = dts_value[0];
430 	}
431 
432 	/* num-slots */
433 	sc->num_slots = 1;
434 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
435 		OF_getencprop(node, "num-slots", dts_value, len);
436 		sc->num_slots = dts_value[0];
437 	}
438 
439 	/*
440 	 * We need some platform-specific code to know
441 	 * what the clock is supplied for our device.
442 	 * For now rely on the value specified in FDT.
443 	 */
444 	if (sc->bus_hz == 0) {
445 		if ((len = OF_getproplen(node, "bus-frequency")) <= 0)
446 			return (ENXIO);
447 		OF_getencprop(node, "bus-frequency", dts_value, len);
448 		sc->bus_hz = dts_value[0];
449 	}
450 
451 	/*
452 	 * Platform-specific stuff
453 	 * XXX: Move to separate file
454 	 */
455 
456 	if ((sc->hwtype & HWTYPE_MASK) != HWTYPE_EXYNOS)
457 		return (0);
458 
459 	if ((len = OF_getproplen(node, "samsung,dw-mshc-ciu-div")) <= 0)
460 		return (ENXIO);
461 	OF_getencprop(node, "samsung,dw-mshc-ciu-div", dts_value, len);
462 	sc->sdr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT);
463 	sc->ddr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT);
464 
465 	if ((len = OF_getproplen(node, "samsung,dw-mshc-sdr-timing")) <= 0)
466 		return (ENXIO);
467 	OF_getencprop(node, "samsung,dw-mshc-sdr-timing", dts_value, len);
468 	sc->sdr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) |
469 			  (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT));
470 
471 	if ((len = OF_getproplen(node, "samsung,dw-mshc-ddr-timing")) <= 0)
472 		return (ENXIO);
473 	OF_getencprop(node, "samsung,dw-mshc-ddr-timing", dts_value, len);
474 	sc->ddr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) |
475 			  (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT));
476 
477 	return (0);
478 }
479 
480 static int
481 dwmmc_probe(device_t dev)
482 {
483 	uintptr_t hwtype;
484 
485 	if (!ofw_bus_status_okay(dev))
486 		return (ENXIO);
487 
488 	hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
489 	if (hwtype == HWTYPE_NONE)
490 		return (ENXIO);
491 
492 	device_set_desc(dev, "Synopsys DesignWare Mobile "
493 				"Storage Host Controller");
494 	return (BUS_PROBE_DEFAULT);
495 }
496 
497 int
498 dwmmc_attach(device_t dev)
499 {
500 	struct dwmmc_softc *sc;
501 	int error;
502 	int slot;
503 
504 	sc = device_get_softc(dev);
505 
506 	sc->dev = dev;
507 	if (sc->hwtype == HWTYPE_NONE) {
508 		sc->hwtype =
509 		    ofw_bus_search_compatible(dev, compat_data)->ocd_data;
510 	}
511 
512 	/* Why not to use Auto Stop? It save a hundred of irq per second */
513 	sc->use_auto_stop = 1;
514 
515 	error = parse_fdt(sc);
516 	if (error != 0) {
517 		device_printf(dev, "Can't get FDT property.\n");
518 		return (ENXIO);
519 	}
520 
521 	DWMMC_LOCK_INIT(sc);
522 
523 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
524 		device_printf(dev, "could not allocate resources\n");
525 		return (ENXIO);
526 	}
527 
528 	/* Setup interrupt handler. */
529 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
530 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
531 	if (error != 0) {
532 		device_printf(dev, "could not setup interrupt handler.\n");
533 		return (ENXIO);
534 	}
535 
536 	device_printf(dev, "Hardware version ID is %04x\n",
537 		READ4(sc, SDMMC_VERID) & 0xffff);
538 
539 	if (sc->desc_count == 0)
540 		sc->desc_count = DESC_MAX;
541 
542 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) {
543 		sc->use_pio = 1;
544 		sc->pwren_inverted = 1;
545 	} else if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
546 		WRITE4(sc, EMMCP_MPSBEGIN0, 0);
547 		WRITE4(sc, EMMCP_SEND0, 0);
548 		WRITE4(sc, EMMCP_CTRL0, (MPSCTRL_SECURE_READ_BIT |
549 					 MPSCTRL_SECURE_WRITE_BIT |
550 					 MPSCTRL_NON_SECURE_READ_BIT |
551 					 MPSCTRL_NON_SECURE_WRITE_BIT |
552 					 MPSCTRL_VALID));
553 	}
554 
555 	/* XXX: we support operation for slot index 0 only */
556 	slot = 0;
557 	if (sc->pwren_inverted) {
558 		WRITE4(sc, SDMMC_PWREN, (0 << slot));
559 	} else {
560 		WRITE4(sc, SDMMC_PWREN, (1 << slot));
561 	}
562 
563 	/* Reset all */
564 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
565 				  SDMMC_CTRL_FIFO_RESET |
566 				  SDMMC_CTRL_DMA_RESET)))
567 		return (ENXIO);
568 
569 	dwmmc_setup_bus(sc, sc->host.f_min);
570 
571 	if (sc->fifo_depth == 0) {
572 		sc->fifo_depth = 1 +
573 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
574 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
575 		    sc->fifo_depth);
576 	}
577 
578 	if (!sc->use_pio) {
579 		if (dma_setup(sc))
580 			return (ENXIO);
581 
582 		/* Install desc base */
583 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
584 
585 		/* Enable DMA interrupts */
586 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
587 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
588 					   SDMMC_IDINTEN_RI |
589 					   SDMMC_IDINTEN_TI));
590 	}
591 
592 	/* Clear and disable interrups for a while */
593 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
594 	WRITE4(sc, SDMMC_INTMASK, 0);
595 
596 	/* Maximum timeout */
597 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
598 
599 	/* Enable interrupts */
600 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
601 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
602 				   SDMMC_INTMASK_DTO |
603 				   SDMMC_INTMASK_ACD |
604 				   SDMMC_INTMASK_TXDR |
605 				   SDMMC_INTMASK_RXDR |
606 				   DWMMC_ERR_FLAGS |
607 				   SDMMC_INTMASK_CD));
608 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
609 
610 	sc->host.f_min = 400000;
611 	sc->host.f_max = min(200000000, sc->bus_hz);
612 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
613 	sc->host.caps = MMC_CAP_4_BIT_DATA;
614 
615 	device_add_child(dev, "mmc", -1);
616 	return (bus_generic_attach(dev));
617 }
618 
619 static int
620 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
621 {
622 	int tout;
623 	int div;
624 
625 	if (freq == 0) {
626 		WRITE4(sc, SDMMC_CLKENA, 0);
627 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
628 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
629 
630 		tout = 1000;
631 		do {
632 			if (tout-- < 0) {
633 				device_printf(sc->dev, "Failed update clk\n");
634 				return (1);
635 			}
636 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
637 
638 		return (0);
639 	}
640 
641 	WRITE4(sc, SDMMC_CLKENA, 0);
642 	WRITE4(sc, SDMMC_CLKSRC, 0);
643 
644 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
645 
646 	WRITE4(sc, SDMMC_CLKDIV, div);
647 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
648 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
649 
650 	tout = 1000;
651 	do {
652 		if (tout-- < 0) {
653 			device_printf(sc->dev, "Failed to update clk");
654 			return (1);
655 		}
656 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
657 
658 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
659 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
660 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
661 
662 	tout = 1000;
663 	do {
664 		if (tout-- < 0) {
665 			device_printf(sc->dev, "Failed to enable clk\n");
666 			return (1);
667 		}
668 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
669 
670 	return (0);
671 }
672 
673 static int
674 dwmmc_update_ios(device_t brdev, device_t reqdev)
675 {
676 	struct dwmmc_softc *sc;
677 	struct mmc_ios *ios;
678 
679 	sc = device_get_softc(brdev);
680 	ios = &sc->host.ios;
681 
682 	dprintf("Setting up clk %u bus_width %d\n",
683 		ios->clock, ios->bus_width);
684 
685 	dwmmc_setup_bus(sc, ios->clock);
686 
687 	if (ios->bus_width == bus_width_8)
688 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
689 	else if (ios->bus_width == bus_width_4)
690 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
691 	else
692 		WRITE4(sc, SDMMC_CTYPE, 0);
693 
694 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
695 		/* XXX: take care about DDR or SDR use here */
696 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
697 	}
698 
699 	/*
700 	 * XXX: take care about DDR bit
701 	 *
702 	 * reg = READ4(sc, SDMMC_UHS_REG);
703 	 * reg |= (SDMMC_UHS_REG_DDR);
704 	 * WRITE4(sc, SDMMC_UHS_REG, reg);
705 	 */
706 
707 	return (0);
708 }
709 
710 static int
711 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
712 {
713 	struct mmc_data *data;
714 
715 	data = cmd->data;
716 
717 	if (data->flags & MMC_DATA_WRITE)
718 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
719 			BUS_DMASYNC_POSTWRITE);
720 	else
721 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
722 			BUS_DMASYNC_POSTREAD);
723 
724 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
725 	    BUS_DMASYNC_POSTWRITE);
726 
727 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
728 
729 	return (0);
730 }
731 
732 static int
733 dma_stop(struct dwmmc_softc *sc)
734 {
735 	int reg;
736 
737 	reg = READ4(sc, SDMMC_CTRL);
738 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
739 	reg |= (SDMMC_CTRL_DMA_RESET);
740 	WRITE4(sc, SDMMC_CTRL, reg);
741 
742 	reg = READ4(sc, SDMMC_BMOD);
743 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
744 	reg |= (SDMMC_BMOD_SWR);
745 	WRITE4(sc, SDMMC_BMOD, reg);
746 
747 	return (0);
748 }
749 
750 static int
751 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
752 {
753 	struct mmc_data *data;
754 	int len;
755 	int err;
756 	int reg;
757 
758 	data = cmd->data;
759 	len = data->len;
760 
761 	reg = READ4(sc, SDMMC_INTMASK);
762 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
763 	WRITE4(sc, SDMMC_INTMASK, reg);
764 
765 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
766 		data->data, data->len, dwmmc_ring_setup,
767 		sc, BUS_DMA_NOWAIT);
768 	if (err != 0)
769 		panic("dmamap_load failed\n");
770 
771 	/* Ensure the device can see the desc */
772 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
773 	    BUS_DMASYNC_PREWRITE);
774 
775 	if (data->flags & MMC_DATA_WRITE)
776 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
777 			BUS_DMASYNC_PREWRITE);
778 	else
779 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
780 			BUS_DMASYNC_PREREAD);
781 
782 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
783 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
784 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
785 
786 	WRITE4(sc, SDMMC_FIFOTH, reg);
787 	wmb();
788 
789 	reg = READ4(sc, SDMMC_CTRL);
790 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
791 	WRITE4(sc, SDMMC_CTRL, reg);
792 	wmb();
793 
794 	reg = READ4(sc, SDMMC_BMOD);
795 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
796 	WRITE4(sc, SDMMC_BMOD, reg);
797 
798 	/* Start */
799 	WRITE4(sc, SDMMC_PLDMND, 1);
800 
801 	return (0);
802 }
803 
804 static int
805 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
806 {
807 	struct mmc_data *data;
808 	int reg;
809 
810 	data = cmd->data;
811 	data->xfer_len = 0;
812 
813 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
814 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
815 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
816 
817 	WRITE4(sc, SDMMC_FIFOTH, reg);
818 	wmb();
819 
820 	return (0);
821 }
822 
823 static void
824 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
825 {
826 	struct mmc_data *data;
827 	uint32_t *p, status;
828 
829 	if (cmd == NULL || cmd->data == NULL)
830 		return;
831 
832 	data = cmd->data;
833 	if ((data->flags & MMC_DATA_READ) == 0)
834 		return;
835 
836 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
837 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
838 
839 	while (data->xfer_len < data->len) {
840 		status = READ4(sc, SDMMC_STATUS);
841 		if (status & SDMMC_STATUS_FIFO_EMPTY)
842 			break;
843 		*p++ = READ4(sc, SDMMC_DATA);
844 		data->xfer_len += 4;
845 	}
846 
847 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
848 }
849 
850 static void
851 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
852 {
853 	struct mmc_data *data;
854 	uint32_t *p, status;
855 
856 	if (cmd == NULL || cmd->data == NULL)
857 		return;
858 
859 	data = cmd->data;
860 	if ((data->flags & MMC_DATA_WRITE) == 0)
861 		return;
862 
863 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
864 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
865 
866 	while (data->xfer_len < data->len) {
867 		status = READ4(sc, SDMMC_STATUS);
868 		if (status & SDMMC_STATUS_FIFO_FULL)
869 			break;
870 		WRITE4(sc, SDMMC_DATA, *p++);
871 		data->xfer_len += 4;
872 	}
873 
874 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
875 }
876 
877 static void
878 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
879 {
880 	struct mmc_data *data;
881 	uint32_t blksz;
882 	uint32_t cmdr;
883 
884 	sc->curcmd = cmd;
885 	data = cmd->data;
886 
887 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
888 		dwmmc_setup_bus(sc, sc->host.ios.clock);
889 
890 	/* XXX Upper layers don't always set this */
891 	cmd->mrq = sc->req;
892 
893 	/* Begin setting up command register. */
894 
895 	cmdr = cmd->opcode;
896 
897 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
898 
899 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
900 	    cmd->opcode == MMC_GO_IDLE_STATE ||
901 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
902 		cmdr |= SDMMC_CMD_STOP_ABORT;
903 	else if (cmd->opcode != MMC_SEND_STATUS && data)
904 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
905 
906 	/* Set up response handling. */
907 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
908 		cmdr |= SDMMC_CMD_RESP_EXP;
909 		if (cmd->flags & MMC_RSP_136)
910 			cmdr |= SDMMC_CMD_RESP_LONG;
911 	}
912 
913 	if (cmd->flags & MMC_RSP_CRC)
914 		cmdr |= SDMMC_CMD_RESP_CRC;
915 
916 	/*
917 	 * XXX: Not all platforms want this.
918 	 */
919 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
920 
921 	if ((sc->flags & CARD_INIT_DONE) == 0) {
922 		sc->flags |= (CARD_INIT_DONE);
923 		cmdr |= SDMMC_CMD_SEND_INIT;
924 	}
925 
926 	if (data) {
927 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
928 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
929 		     sc->use_auto_stop)
930 			cmdr |= SDMMC_CMD_SEND_ASTOP;
931 
932 		cmdr |= SDMMC_CMD_DATA_EXP;
933 		if (data->flags & MMC_DATA_STREAM)
934 			cmdr |= SDMMC_CMD_MODE_STREAM;
935 		if (data->flags & MMC_DATA_WRITE)
936 			cmdr |= SDMMC_CMD_DATA_WRITE;
937 
938 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
939 		WRITE4(sc, SDMMC_BYTCNT, data->len);
940 		blksz = (data->len < MMC_SECTOR_SIZE) ? \
941 			 data->len : MMC_SECTOR_SIZE;
942 		WRITE4(sc, SDMMC_BLKSIZ, blksz);
943 
944 		if (sc->use_pio) {
945 			pio_prepare(sc, cmd);
946 		} else {
947 			dma_prepare(sc, cmd);
948 		}
949 		wmb();
950 	}
951 
952 	dprintf("cmdr 0x%08x\n", cmdr);
953 
954 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
955 	wmb();
956 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
957 };
958 
959 static void
960 dwmmc_next_operation(struct dwmmc_softc *sc)
961 {
962 	struct mmc_request *req;
963 
964 	req = sc->req;
965 	if (req == NULL)
966 		return;
967 
968 	sc->acd_rcvd = 0;
969 	sc->dto_rcvd = 0;
970 	sc->cmd_done = 0;
971 
972 	/*
973 	 * XXX: Wait until card is still busy.
974 	 * We do need this to prevent data timeouts,
975 	 * mostly caused by multi-block write command
976 	 * followed by single-read.
977 	 */
978 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
979 		continue;
980 
981 	if (sc->flags & PENDING_CMD) {
982 		sc->flags &= ~PENDING_CMD;
983 		dwmmc_start_cmd(sc, req->cmd);
984 		return;
985 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
986 		sc->flags &= ~PENDING_STOP;
987 		dwmmc_start_cmd(sc, req->stop);
988 		return;
989 	}
990 
991 	sc->req = NULL;
992 	sc->curcmd = NULL;
993 	req->done(req);
994 }
995 
996 static int
997 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
998 {
999 	struct dwmmc_softc *sc;
1000 
1001 	sc = device_get_softc(brdev);
1002 
1003 	dprintf("%s\n", __func__);
1004 
1005 	DWMMC_LOCK(sc);
1006 
1007 	if (sc->req != NULL) {
1008 		DWMMC_UNLOCK(sc);
1009 		return (EBUSY);
1010 	}
1011 
1012 	sc->req = req;
1013 	sc->flags |= PENDING_CMD;
1014 	if (sc->req->stop)
1015 		sc->flags |= PENDING_STOP;
1016 	dwmmc_next_operation(sc);
1017 
1018 	DWMMC_UNLOCK(sc);
1019 	return (0);
1020 }
1021 
1022 static int
1023 dwmmc_get_ro(device_t brdev, device_t reqdev)
1024 {
1025 
1026 	dprintf("%s\n", __func__);
1027 
1028 	return (0);
1029 }
1030 
1031 static int
1032 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1033 {
1034 	struct dwmmc_softc *sc;
1035 
1036 	sc = device_get_softc(brdev);
1037 
1038 	DWMMC_LOCK(sc);
1039 	while (sc->bus_busy)
1040 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1041 	sc->bus_busy++;
1042 	DWMMC_UNLOCK(sc);
1043 	return (0);
1044 }
1045 
1046 static int
1047 dwmmc_release_host(device_t brdev, device_t reqdev)
1048 {
1049 	struct dwmmc_softc *sc;
1050 
1051 	sc = device_get_softc(brdev);
1052 
1053 	DWMMC_LOCK(sc);
1054 	sc->bus_busy--;
1055 	wakeup(sc);
1056 	DWMMC_UNLOCK(sc);
1057 	return (0);
1058 }
1059 
1060 static int
1061 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1062 {
1063 	struct dwmmc_softc *sc;
1064 
1065 	sc = device_get_softc(bus);
1066 
1067 	switch (which) {
1068 	default:
1069 		return (EINVAL);
1070 	case MMCBR_IVAR_BUS_MODE:
1071 		*(int *)result = sc->host.ios.bus_mode;
1072 		break;
1073 	case MMCBR_IVAR_BUS_WIDTH:
1074 		*(int *)result = sc->host.ios.bus_width;
1075 		break;
1076 	case MMCBR_IVAR_CHIP_SELECT:
1077 		*(int *)result = sc->host.ios.chip_select;
1078 		break;
1079 	case MMCBR_IVAR_CLOCK:
1080 		*(int *)result = sc->host.ios.clock;
1081 		break;
1082 	case MMCBR_IVAR_F_MIN:
1083 		*(int *)result = sc->host.f_min;
1084 		break;
1085 	case MMCBR_IVAR_F_MAX:
1086 		*(int *)result = sc->host.f_max;
1087 		break;
1088 	case MMCBR_IVAR_HOST_OCR:
1089 		*(int *)result = sc->host.host_ocr;
1090 		break;
1091 	case MMCBR_IVAR_MODE:
1092 		*(int *)result = sc->host.mode;
1093 		break;
1094 	case MMCBR_IVAR_OCR:
1095 		*(int *)result = sc->host.ocr;
1096 		break;
1097 	case MMCBR_IVAR_POWER_MODE:
1098 		*(int *)result = sc->host.ios.power_mode;
1099 		break;
1100 	case MMCBR_IVAR_VDD:
1101 		*(int *)result = sc->host.ios.vdd;
1102 		break;
1103 	case MMCBR_IVAR_CAPS:
1104 		sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
1105 		*(int *)result = sc->host.caps;
1106 		break;
1107 	case MMCBR_IVAR_MAX_DATA:
1108 		*(int *)result = sc->desc_count;
1109 	}
1110 	return (0);
1111 }
1112 
1113 static int
1114 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1115 {
1116 	struct dwmmc_softc *sc;
1117 
1118 	sc = device_get_softc(bus);
1119 
1120 	switch (which) {
1121 	default:
1122 		return (EINVAL);
1123 	case MMCBR_IVAR_BUS_MODE:
1124 		sc->host.ios.bus_mode = value;
1125 		break;
1126 	case MMCBR_IVAR_BUS_WIDTH:
1127 		sc->host.ios.bus_width = value;
1128 		break;
1129 	case MMCBR_IVAR_CHIP_SELECT:
1130 		sc->host.ios.chip_select = value;
1131 		break;
1132 	case MMCBR_IVAR_CLOCK:
1133 		sc->host.ios.clock = value;
1134 		break;
1135 	case MMCBR_IVAR_MODE:
1136 		sc->host.mode = value;
1137 		break;
1138 	case MMCBR_IVAR_OCR:
1139 		sc->host.ocr = value;
1140 		break;
1141 	case MMCBR_IVAR_POWER_MODE:
1142 		sc->host.ios.power_mode = value;
1143 		break;
1144 	case MMCBR_IVAR_VDD:
1145 		sc->host.ios.vdd = value;
1146 		break;
1147 	/* These are read-only */
1148 	case MMCBR_IVAR_CAPS:
1149 	case MMCBR_IVAR_HOST_OCR:
1150 	case MMCBR_IVAR_F_MIN:
1151 	case MMCBR_IVAR_F_MAX:
1152 	case MMCBR_IVAR_MAX_DATA:
1153 		return (EINVAL);
1154 	}
1155 	return (0);
1156 }
1157 
1158 static device_method_t dwmmc_methods[] = {
1159 	DEVMETHOD(device_probe,		dwmmc_probe),
1160 	DEVMETHOD(device_attach,	dwmmc_attach),
1161 
1162 	/* Bus interface */
1163 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1164 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1165 
1166 	/* mmcbr_if */
1167 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1168 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1169 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1170 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1171 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1172 
1173 	DEVMETHOD_END
1174 };
1175 
1176 driver_t dwmmc_driver = {
1177 	"dwmmc",
1178 	dwmmc_methods,
1179 	sizeof(struct dwmmc_softc),
1180 };
1181 
1182 static devclass_t dwmmc_devclass;
1183 
1184 DRIVER_MODULE(dwmmc, simplebus, dwmmc_driver, dwmmc_devclass, NULL, NULL);
1185 DRIVER_MODULE(dwmmc, ofwbus, dwmmc_driver, dwmmc_devclass, NULL, NULL);
1186 #ifndef MMCCAM
1187 MMC_DECLARE_BRIDGE(dwmmc);
1188 #endif
1189