xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision 05890ca018513401980415f735eca8c94aaa644a)
1 /*-
2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/module.h>
44 #include <sys/malloc.h>
45 #include <sys/rman.h>
46 
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcbrvar.h>
49 
50 #include <dev/fdt/fdt_common.h>
51 #include <dev/ofw/openfirm.h>
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54 
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/intr.h>
58 
59 #include <dev/mmc/host/dwmmc_reg.h>
60 #include <dev/mmc/host/dwmmc_var.h>
61 
62 #include "opt_mmccam.h"
63 
64 #include "mmcbr_if.h"
65 
66 #define dprintf(x, arg...)
67 
68 #define	READ4(_sc, _reg) \
69 	bus_read_4((_sc)->res[0], _reg)
70 #define	WRITE4(_sc, _reg, _val) \
71 	bus_write_4((_sc)->res[0], _reg, _val)
72 
73 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
74 
75 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
76 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
77 #define	DWMMC_LOCK_INIT(_sc) \
78 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
79 	    "dwmmc", MTX_DEF)
80 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
81 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
82 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
83 
84 #define	PENDING_CMD	0x01
85 #define	PENDING_STOP	0x02
86 #define	CARD_INIT_DONE	0x04
87 
88 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
89 				|SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \
90 				|SDMMC_INTMASK_EBE)
91 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
92 				|SDMMC_INTMASK_RE)
93 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
94 				|SDMMC_INTMASK_HLE)
95 
96 #define	DES0_DIC	(1 << 1)
97 #define	DES0_LD		(1 << 2)
98 #define	DES0_FS		(1 << 3)
99 #define	DES0_CH		(1 << 4)
100 #define	DES0_ER		(1 << 5)
101 #define	DES0_CES	(1 << 30)
102 #define	DES0_OWN	(1 << 31)
103 
104 #define	DES1_BS1_MASK	0xfff
105 #define	DES1_BS1_SHIFT	0
106 
107 struct idmac_desc {
108 	uint32_t	des0;	/* control */
109 	uint32_t	des1;	/* bufsize */
110 	uint32_t	des2;	/* buf1 phys addr */
111 	uint32_t	des3;	/* buf2 phys addr or next descr */
112 };
113 
114 #define	DESC_MAX	256
115 #define	DESC_SIZE	(sizeof(struct idmac_desc) * DESC_MAX)
116 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
117 
118 static void dwmmc_next_operation(struct dwmmc_softc *);
119 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
120 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
121 static int dma_stop(struct dwmmc_softc *);
122 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
123 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
124 
125 static struct resource_spec dwmmc_spec[] = {
126 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
127 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
128 	{ -1, 0 }
129 };
130 
131 #define	HWTYPE_MASK		(0x0000ffff)
132 #define	HWFLAG_MASK		(0xffff << 16)
133 
134 static void
135 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
136 {
137 
138 	if (error != 0)
139 		return;
140 	*(bus_addr_t *)arg = segs[0].ds_addr;
141 }
142 
143 static void
144 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
145 {
146 	struct dwmmc_softc *sc;
147 	int idx;
148 
149 	if (error != 0)
150 		return;
151 
152 	sc = arg;
153 
154 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
155 
156 	for (idx = 0; idx < nsegs; idx++) {
157 		sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH);
158 		sc->desc_ring[idx].des1 = segs[idx].ds_len;
159 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
160 
161 		if (idx == 0)
162 			sc->desc_ring[idx].des0 |= DES0_FS;
163 
164 		if (idx == (nsegs - 1)) {
165 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
166 			sc->desc_ring[idx].des0 |= DES0_LD;
167 		}
168 	}
169 }
170 
171 static int
172 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
173 {
174 	int reg;
175 	int i;
176 
177 	reg = READ4(sc, SDMMC_CTRL);
178 	reg |= (reset_bits);
179 	WRITE4(sc, SDMMC_CTRL, reg);
180 
181 	/* Wait reset done */
182 	for (i = 0; i < 100; i++) {
183 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
184 			return (0);
185 		DELAY(10);
186 	}
187 
188 	device_printf(sc->dev, "Reset failed\n");
189 
190 	return (1);
191 }
192 
193 static int
194 dma_setup(struct dwmmc_softc *sc)
195 {
196 	int error;
197 	int nidx;
198 	int idx;
199 
200 	/*
201 	 * Set up TX descriptor ring, descriptors, and dma maps.
202 	 */
203 	error = bus_dma_tag_create(
204 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
205 	    4096, 0,			/* alignment, boundary */
206 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
207 	    BUS_SPACE_MAXADDR,		/* highaddr */
208 	    NULL, NULL,			/* filter, filterarg */
209 	    DESC_SIZE, 1, 		/* maxsize, nsegments */
210 	    DESC_SIZE,			/* maxsegsize */
211 	    0,				/* flags */
212 	    NULL, NULL,			/* lockfunc, lockarg */
213 	    &sc->desc_tag);
214 	if (error != 0) {
215 		device_printf(sc->dev,
216 		    "could not create ring DMA tag.\n");
217 		return (1);
218 	}
219 
220 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
221 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
222 	    &sc->desc_map);
223 	if (error != 0) {
224 		device_printf(sc->dev,
225 		    "could not allocate descriptor ring.\n");
226 		return (1);
227 	}
228 
229 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
230 	    sc->desc_ring, DESC_SIZE, dwmmc_get1paddr,
231 	    &sc->desc_ring_paddr, 0);
232 	if (error != 0) {
233 		device_printf(sc->dev,
234 		    "could not load descriptor ring map.\n");
235 		return (1);
236 	}
237 
238 	for (idx = 0; idx < sc->desc_count; idx++) {
239 		sc->desc_ring[idx].des0 = DES0_CH;
240 		sc->desc_ring[idx].des1 = 0;
241 		nidx = (idx + 1) % sc->desc_count;
242 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
243 		    (nidx * sizeof(struct idmac_desc));
244 	}
245 
246 	error = bus_dma_tag_create(
247 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
248 	    4096, 0,			/* alignment, boundary */
249 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
250 	    BUS_SPACE_MAXADDR,		/* highaddr */
251 	    NULL, NULL,			/* filter, filterarg */
252 	    sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */
253 	    sc->desc_count,		/* nsegments */
254 	    MMC_SECTOR_SIZE,		/* maxsegsize */
255 	    0,				/* flags */
256 	    NULL, NULL,			/* lockfunc, lockarg */
257 	    &sc->buf_tag);
258 	if (error != 0) {
259 		device_printf(sc->dev,
260 		    "could not create ring DMA tag.\n");
261 		return (1);
262 	}
263 
264 	error = bus_dmamap_create(sc->buf_tag, 0,
265 	    &sc->buf_map);
266 	if (error != 0) {
267 		device_printf(sc->dev,
268 		    "could not create TX buffer DMA map.\n");
269 		return (1);
270 	}
271 
272 	return (0);
273 }
274 
275 static void
276 dwmmc_cmd_done(struct dwmmc_softc *sc)
277 {
278 	struct mmc_command *cmd;
279 
280 	cmd = sc->curcmd;
281 	if (cmd == NULL)
282 		return;
283 
284 	if (cmd->flags & MMC_RSP_PRESENT) {
285 		if (cmd->flags & MMC_RSP_136) {
286 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
287 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
288 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
289 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
290 		} else {
291 			cmd->resp[3] = 0;
292 			cmd->resp[2] = 0;
293 			cmd->resp[1] = 0;
294 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
295 		}
296 	}
297 }
298 
299 static void
300 dwmmc_tasklet(struct dwmmc_softc *sc)
301 {
302 	struct mmc_command *cmd;
303 
304 	cmd = sc->curcmd;
305 	if (cmd == NULL)
306 		return;
307 
308 	if (!sc->cmd_done)
309 		return;
310 
311 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
312 		dwmmc_next_operation(sc);
313 	} else if (cmd->data && sc->dto_rcvd) {
314 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
315 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
316 		     sc->use_auto_stop) {
317 			if (sc->acd_rcvd)
318 				dwmmc_next_operation(sc);
319 		} else {
320 			dwmmc_next_operation(sc);
321 		}
322 	}
323 }
324 
325 static void
326 dwmmc_intr(void *arg)
327 {
328 	struct mmc_command *cmd;
329 	struct dwmmc_softc *sc;
330 	uint32_t reg;
331 
332 	sc = arg;
333 
334 	DWMMC_LOCK(sc);
335 
336 	cmd = sc->curcmd;
337 
338 	/* First handle SDMMC controller interrupts */
339 	reg = READ4(sc, SDMMC_MINTSTS);
340 	if (reg) {
341 		dprintf("%s 0x%08x\n", __func__, reg);
342 
343 		if (reg & DWMMC_CMD_ERR_FLAGS) {
344 			WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS);
345 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
346 				reg, cmd->opcode);
347 			cmd->error = MMC_ERR_TIMEOUT;
348 		}
349 
350 		if (reg & DWMMC_DATA_ERR_FLAGS) {
351 			WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS);
352 			dprintf("data err 0x%08x cmd 0x%08x\n",
353 				reg, cmd->opcode);
354 			cmd->error = MMC_ERR_FAILED;
355 			if (!sc->use_pio) {
356 				dma_done(sc, cmd);
357 				dma_stop(sc);
358 			}
359 		}
360 
361 		if (reg & SDMMC_INTMASK_CMD_DONE) {
362 			dwmmc_cmd_done(sc);
363 			sc->cmd_done = 1;
364 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE);
365 		}
366 
367 		if (reg & SDMMC_INTMASK_ACD) {
368 			sc->acd_rcvd = 1;
369 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD);
370 		}
371 
372 		if (reg & SDMMC_INTMASK_DTO) {
373 			sc->dto_rcvd = 1;
374 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO);
375 		}
376 
377 		if (reg & SDMMC_INTMASK_CD) {
378 			/* XXX: Handle card detect */
379 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD);
380 		}
381 	}
382 
383 	if (sc->use_pio) {
384 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
385 			pio_read(sc, cmd);
386 		}
387 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
388 			pio_write(sc, cmd);
389 		}
390 	} else {
391 		/* Now handle DMA interrupts */
392 		reg = READ4(sc, SDMMC_IDSTS);
393 		if (reg) {
394 			dprintf("dma intr 0x%08x\n", reg);
395 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
396 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
397 							 SDMMC_IDINTEN_RI));
398 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
399 				dma_done(sc, cmd);
400 			}
401 		}
402 	}
403 
404 	dwmmc_tasklet(sc);
405 
406 	DWMMC_UNLOCK(sc);
407 }
408 
409 static int
410 parse_fdt(struct dwmmc_softc *sc)
411 {
412 	pcell_t dts_value[3];
413 	phandle_t node;
414 	int len;
415 
416 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
417 		return (ENXIO);
418 
419 	/* fifo-depth */
420 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
421 		OF_getencprop(node, "fifo-depth", dts_value, len);
422 		sc->fifo_depth = dts_value[0];
423 	}
424 
425 	/* num-slots */
426 	sc->num_slots = 1;
427 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
428 		OF_getencprop(node, "num-slots", dts_value, len);
429 		sc->num_slots = dts_value[0];
430 	}
431 
432 	/*
433 	 * We need some platform-specific code to know
434 	 * what the clock is supplied for our device.
435 	 * For now rely on the value specified in FDT.
436 	 */
437 	if (sc->bus_hz == 0) {
438 		if ((len = OF_getproplen(node, "bus-frequency")) <= 0)
439 			return (ENXIO);
440 		OF_getencprop(node, "bus-frequency", dts_value, len);
441 		sc->bus_hz = dts_value[0];
442 	}
443 
444 	return (0);
445 }
446 
447 int
448 dwmmc_attach(device_t dev)
449 {
450 	struct dwmmc_softc *sc;
451 	int error;
452 	int slot;
453 
454 	sc = device_get_softc(dev);
455 
456 	sc->dev = dev;
457 
458 	/* Why not to use Auto Stop? It save a hundred of irq per second */
459 	sc->use_auto_stop = 1;
460 
461 	error = parse_fdt(sc);
462 	if (error != 0) {
463 		device_printf(dev, "Can't get FDT property.\n");
464 		return (ENXIO);
465 	}
466 
467 	DWMMC_LOCK_INIT(sc);
468 
469 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
470 		device_printf(dev, "could not allocate resources\n");
471 		return (ENXIO);
472 	}
473 
474 	/* Setup interrupt handler. */
475 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
476 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
477 	if (error != 0) {
478 		device_printf(dev, "could not setup interrupt handler.\n");
479 		return (ENXIO);
480 	}
481 
482 	device_printf(dev, "Hardware version ID is %04x\n",
483 		READ4(sc, SDMMC_VERID) & 0xffff);
484 
485 	if (sc->desc_count == 0)
486 		sc->desc_count = DESC_MAX;
487 
488 	/* XXX: we support operation for slot index 0 only */
489 	slot = 0;
490 	if (sc->pwren_inverted) {
491 		WRITE4(sc, SDMMC_PWREN, (0 << slot));
492 	} else {
493 		WRITE4(sc, SDMMC_PWREN, (1 << slot));
494 	}
495 
496 	/* Reset all */
497 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
498 				  SDMMC_CTRL_FIFO_RESET |
499 				  SDMMC_CTRL_DMA_RESET)))
500 		return (ENXIO);
501 
502 	dwmmc_setup_bus(sc, sc->host.f_min);
503 
504 	if (sc->fifo_depth == 0) {
505 		sc->fifo_depth = 1 +
506 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
507 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
508 		    sc->fifo_depth);
509 	}
510 
511 	if (!sc->use_pio) {
512 		if (dma_setup(sc))
513 			return (ENXIO);
514 
515 		/* Install desc base */
516 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
517 
518 		/* Enable DMA interrupts */
519 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
520 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
521 					   SDMMC_IDINTEN_RI |
522 					   SDMMC_IDINTEN_TI));
523 	}
524 
525 	/* Clear and disable interrups for a while */
526 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
527 	WRITE4(sc, SDMMC_INTMASK, 0);
528 
529 	/* Maximum timeout */
530 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
531 
532 	/* Enable interrupts */
533 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
534 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
535 				   SDMMC_INTMASK_DTO |
536 				   SDMMC_INTMASK_ACD |
537 				   SDMMC_INTMASK_TXDR |
538 				   SDMMC_INTMASK_RXDR |
539 				   DWMMC_ERR_FLAGS |
540 				   SDMMC_INTMASK_CD));
541 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
542 
543 	sc->host.f_min = 400000;
544 	sc->host.f_max = min(200000000, sc->bus_hz);
545 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
546 	sc->host.caps = MMC_CAP_4_BIT_DATA;
547 
548 	device_add_child(dev, "mmc", -1);
549 	return (bus_generic_attach(dev));
550 }
551 
552 static int
553 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
554 {
555 	int tout;
556 	int div;
557 
558 	if (freq == 0) {
559 		WRITE4(sc, SDMMC_CLKENA, 0);
560 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
561 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
562 
563 		tout = 1000;
564 		do {
565 			if (tout-- < 0) {
566 				device_printf(sc->dev, "Failed update clk\n");
567 				return (1);
568 			}
569 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
570 
571 		return (0);
572 	}
573 
574 	WRITE4(sc, SDMMC_CLKENA, 0);
575 	WRITE4(sc, SDMMC_CLKSRC, 0);
576 
577 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
578 
579 	WRITE4(sc, SDMMC_CLKDIV, div);
580 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
581 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
582 
583 	tout = 1000;
584 	do {
585 		if (tout-- < 0) {
586 			device_printf(sc->dev, "Failed to update clk");
587 			return (1);
588 		}
589 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
590 
591 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
592 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
593 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
594 
595 	tout = 1000;
596 	do {
597 		if (tout-- < 0) {
598 			device_printf(sc->dev, "Failed to enable clk\n");
599 			return (1);
600 		}
601 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
602 
603 	return (0);
604 }
605 
606 static int
607 dwmmc_update_ios(device_t brdev, device_t reqdev)
608 {
609 	struct dwmmc_softc *sc;
610 	struct mmc_ios *ios;
611 
612 	sc = device_get_softc(brdev);
613 	ios = &sc->host.ios;
614 
615 	dprintf("Setting up clk %u bus_width %d\n",
616 		ios->clock, ios->bus_width);
617 
618 	dwmmc_setup_bus(sc, ios->clock);
619 
620 	if (ios->bus_width == bus_width_8)
621 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
622 	else if (ios->bus_width == bus_width_4)
623 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
624 	else
625 		WRITE4(sc, SDMMC_CTYPE, 0);
626 
627 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
628 		/* XXX: take care about DDR or SDR use here */
629 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
630 	}
631 
632 	/*
633 	 * XXX: take care about DDR bit
634 	 *
635 	 * reg = READ4(sc, SDMMC_UHS_REG);
636 	 * reg |= (SDMMC_UHS_REG_DDR);
637 	 * WRITE4(sc, SDMMC_UHS_REG, reg);
638 	 */
639 
640 	return (0);
641 }
642 
643 static int
644 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
645 {
646 	struct mmc_data *data;
647 
648 	data = cmd->data;
649 
650 	if (data->flags & MMC_DATA_WRITE)
651 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
652 			BUS_DMASYNC_POSTWRITE);
653 	else
654 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
655 			BUS_DMASYNC_POSTREAD);
656 
657 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
658 	    BUS_DMASYNC_POSTWRITE);
659 
660 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
661 
662 	return (0);
663 }
664 
665 static int
666 dma_stop(struct dwmmc_softc *sc)
667 {
668 	int reg;
669 
670 	reg = READ4(sc, SDMMC_CTRL);
671 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
672 	reg |= (SDMMC_CTRL_DMA_RESET);
673 	WRITE4(sc, SDMMC_CTRL, reg);
674 
675 	reg = READ4(sc, SDMMC_BMOD);
676 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
677 	reg |= (SDMMC_BMOD_SWR);
678 	WRITE4(sc, SDMMC_BMOD, reg);
679 
680 	return (0);
681 }
682 
683 static int
684 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
685 {
686 	struct mmc_data *data;
687 	int err;
688 	int reg;
689 
690 	data = cmd->data;
691 
692 	reg = READ4(sc, SDMMC_INTMASK);
693 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
694 	WRITE4(sc, SDMMC_INTMASK, reg);
695 
696 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
697 		data->data, data->len, dwmmc_ring_setup,
698 		sc, BUS_DMA_NOWAIT);
699 	if (err != 0)
700 		panic("dmamap_load failed\n");
701 
702 	/* Ensure the device can see the desc */
703 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
704 	    BUS_DMASYNC_PREWRITE);
705 
706 	if (data->flags & MMC_DATA_WRITE)
707 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
708 			BUS_DMASYNC_PREWRITE);
709 	else
710 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
711 			BUS_DMASYNC_PREREAD);
712 
713 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
714 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
715 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
716 
717 	WRITE4(sc, SDMMC_FIFOTH, reg);
718 	wmb();
719 
720 	reg = READ4(sc, SDMMC_CTRL);
721 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
722 	WRITE4(sc, SDMMC_CTRL, reg);
723 	wmb();
724 
725 	reg = READ4(sc, SDMMC_BMOD);
726 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
727 	WRITE4(sc, SDMMC_BMOD, reg);
728 
729 	/* Start */
730 	WRITE4(sc, SDMMC_PLDMND, 1);
731 
732 	return (0);
733 }
734 
735 static int
736 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
737 {
738 	struct mmc_data *data;
739 	int reg;
740 
741 	data = cmd->data;
742 	data->xfer_len = 0;
743 
744 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
745 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
746 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
747 
748 	WRITE4(sc, SDMMC_FIFOTH, reg);
749 	wmb();
750 
751 	return (0);
752 }
753 
754 static void
755 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
756 {
757 	struct mmc_data *data;
758 	uint32_t *p, status;
759 
760 	if (cmd == NULL || cmd->data == NULL)
761 		return;
762 
763 	data = cmd->data;
764 	if ((data->flags & MMC_DATA_READ) == 0)
765 		return;
766 
767 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
768 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
769 
770 	while (data->xfer_len < data->len) {
771 		status = READ4(sc, SDMMC_STATUS);
772 		if (status & SDMMC_STATUS_FIFO_EMPTY)
773 			break;
774 		*p++ = READ4(sc, SDMMC_DATA);
775 		data->xfer_len += 4;
776 	}
777 
778 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
779 }
780 
781 static void
782 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
783 {
784 	struct mmc_data *data;
785 	uint32_t *p, status;
786 
787 	if (cmd == NULL || cmd->data == NULL)
788 		return;
789 
790 	data = cmd->data;
791 	if ((data->flags & MMC_DATA_WRITE) == 0)
792 		return;
793 
794 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
795 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
796 
797 	while (data->xfer_len < data->len) {
798 		status = READ4(sc, SDMMC_STATUS);
799 		if (status & SDMMC_STATUS_FIFO_FULL)
800 			break;
801 		WRITE4(sc, SDMMC_DATA, *p++);
802 		data->xfer_len += 4;
803 	}
804 
805 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
806 }
807 
808 static void
809 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
810 {
811 	struct mmc_data *data;
812 	uint32_t blksz;
813 	uint32_t cmdr;
814 
815 	sc->curcmd = cmd;
816 	data = cmd->data;
817 
818 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
819 		dwmmc_setup_bus(sc, sc->host.ios.clock);
820 
821 	/* XXX Upper layers don't always set this */
822 	cmd->mrq = sc->req;
823 
824 	/* Begin setting up command register. */
825 
826 	cmdr = cmd->opcode;
827 
828 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
829 
830 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
831 	    cmd->opcode == MMC_GO_IDLE_STATE ||
832 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
833 		cmdr |= SDMMC_CMD_STOP_ABORT;
834 	else if (cmd->opcode != MMC_SEND_STATUS && data)
835 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
836 
837 	/* Set up response handling. */
838 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
839 		cmdr |= SDMMC_CMD_RESP_EXP;
840 		if (cmd->flags & MMC_RSP_136)
841 			cmdr |= SDMMC_CMD_RESP_LONG;
842 	}
843 
844 	if (cmd->flags & MMC_RSP_CRC)
845 		cmdr |= SDMMC_CMD_RESP_CRC;
846 
847 	/*
848 	 * XXX: Not all platforms want this.
849 	 */
850 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
851 
852 	if ((sc->flags & CARD_INIT_DONE) == 0) {
853 		sc->flags |= (CARD_INIT_DONE);
854 		cmdr |= SDMMC_CMD_SEND_INIT;
855 	}
856 
857 	if (data) {
858 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
859 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
860 		     sc->use_auto_stop)
861 			cmdr |= SDMMC_CMD_SEND_ASTOP;
862 
863 		cmdr |= SDMMC_CMD_DATA_EXP;
864 		if (data->flags & MMC_DATA_STREAM)
865 			cmdr |= SDMMC_CMD_MODE_STREAM;
866 		if (data->flags & MMC_DATA_WRITE)
867 			cmdr |= SDMMC_CMD_DATA_WRITE;
868 
869 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
870 		WRITE4(sc, SDMMC_BYTCNT, data->len);
871 		blksz = (data->len < MMC_SECTOR_SIZE) ? \
872 			 data->len : MMC_SECTOR_SIZE;
873 		WRITE4(sc, SDMMC_BLKSIZ, blksz);
874 
875 		if (sc->use_pio) {
876 			pio_prepare(sc, cmd);
877 		} else {
878 			dma_prepare(sc, cmd);
879 		}
880 		wmb();
881 	}
882 
883 	dprintf("cmdr 0x%08x\n", cmdr);
884 
885 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
886 	wmb();
887 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
888 };
889 
890 static void
891 dwmmc_next_operation(struct dwmmc_softc *sc)
892 {
893 	struct mmc_request *req;
894 
895 	req = sc->req;
896 	if (req == NULL)
897 		return;
898 
899 	sc->acd_rcvd = 0;
900 	sc->dto_rcvd = 0;
901 	sc->cmd_done = 0;
902 
903 	/*
904 	 * XXX: Wait until card is still busy.
905 	 * We do need this to prevent data timeouts,
906 	 * mostly caused by multi-block write command
907 	 * followed by single-read.
908 	 */
909 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
910 		continue;
911 
912 	if (sc->flags & PENDING_CMD) {
913 		sc->flags &= ~PENDING_CMD;
914 		dwmmc_start_cmd(sc, req->cmd);
915 		return;
916 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
917 		sc->flags &= ~PENDING_STOP;
918 		dwmmc_start_cmd(sc, req->stop);
919 		return;
920 	}
921 
922 	sc->req = NULL;
923 	sc->curcmd = NULL;
924 	req->done(req);
925 }
926 
927 static int
928 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
929 {
930 	struct dwmmc_softc *sc;
931 
932 	sc = device_get_softc(brdev);
933 
934 	dprintf("%s\n", __func__);
935 
936 	DWMMC_LOCK(sc);
937 
938 	if (sc->req != NULL) {
939 		DWMMC_UNLOCK(sc);
940 		return (EBUSY);
941 	}
942 
943 	sc->req = req;
944 	sc->flags |= PENDING_CMD;
945 	if (sc->req->stop)
946 		sc->flags |= PENDING_STOP;
947 	dwmmc_next_operation(sc);
948 
949 	DWMMC_UNLOCK(sc);
950 	return (0);
951 }
952 
953 static int
954 dwmmc_get_ro(device_t brdev, device_t reqdev)
955 {
956 
957 	dprintf("%s\n", __func__);
958 
959 	return (0);
960 }
961 
962 static int
963 dwmmc_acquire_host(device_t brdev, device_t reqdev)
964 {
965 	struct dwmmc_softc *sc;
966 
967 	sc = device_get_softc(brdev);
968 
969 	DWMMC_LOCK(sc);
970 	while (sc->bus_busy)
971 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
972 	sc->bus_busy++;
973 	DWMMC_UNLOCK(sc);
974 	return (0);
975 }
976 
977 static int
978 dwmmc_release_host(device_t brdev, device_t reqdev)
979 {
980 	struct dwmmc_softc *sc;
981 
982 	sc = device_get_softc(brdev);
983 
984 	DWMMC_LOCK(sc);
985 	sc->bus_busy--;
986 	wakeup(sc);
987 	DWMMC_UNLOCK(sc);
988 	return (0);
989 }
990 
991 static int
992 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
993 {
994 	struct dwmmc_softc *sc;
995 
996 	sc = device_get_softc(bus);
997 
998 	switch (which) {
999 	default:
1000 		return (EINVAL);
1001 	case MMCBR_IVAR_BUS_MODE:
1002 		*(int *)result = sc->host.ios.bus_mode;
1003 		break;
1004 	case MMCBR_IVAR_BUS_WIDTH:
1005 		*(int *)result = sc->host.ios.bus_width;
1006 		break;
1007 	case MMCBR_IVAR_CHIP_SELECT:
1008 		*(int *)result = sc->host.ios.chip_select;
1009 		break;
1010 	case MMCBR_IVAR_CLOCK:
1011 		*(int *)result = sc->host.ios.clock;
1012 		break;
1013 	case MMCBR_IVAR_F_MIN:
1014 		*(int *)result = sc->host.f_min;
1015 		break;
1016 	case MMCBR_IVAR_F_MAX:
1017 		*(int *)result = sc->host.f_max;
1018 		break;
1019 	case MMCBR_IVAR_HOST_OCR:
1020 		*(int *)result = sc->host.host_ocr;
1021 		break;
1022 	case MMCBR_IVAR_MODE:
1023 		*(int *)result = sc->host.mode;
1024 		break;
1025 	case MMCBR_IVAR_OCR:
1026 		*(int *)result = sc->host.ocr;
1027 		break;
1028 	case MMCBR_IVAR_POWER_MODE:
1029 		*(int *)result = sc->host.ios.power_mode;
1030 		break;
1031 	case MMCBR_IVAR_VDD:
1032 		*(int *)result = sc->host.ios.vdd;
1033 		break;
1034 	case MMCBR_IVAR_CAPS:
1035 		sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
1036 		*(int *)result = sc->host.caps;
1037 		break;
1038 	case MMCBR_IVAR_MAX_DATA:
1039 		*(int *)result = sc->desc_count;
1040 	}
1041 	return (0);
1042 }
1043 
1044 static int
1045 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1046 {
1047 	struct dwmmc_softc *sc;
1048 
1049 	sc = device_get_softc(bus);
1050 
1051 	switch (which) {
1052 	default:
1053 		return (EINVAL);
1054 	case MMCBR_IVAR_BUS_MODE:
1055 		sc->host.ios.bus_mode = value;
1056 		break;
1057 	case MMCBR_IVAR_BUS_WIDTH:
1058 		sc->host.ios.bus_width = value;
1059 		break;
1060 	case MMCBR_IVAR_CHIP_SELECT:
1061 		sc->host.ios.chip_select = value;
1062 		break;
1063 	case MMCBR_IVAR_CLOCK:
1064 		sc->host.ios.clock = value;
1065 		break;
1066 	case MMCBR_IVAR_MODE:
1067 		sc->host.mode = value;
1068 		break;
1069 	case MMCBR_IVAR_OCR:
1070 		sc->host.ocr = value;
1071 		break;
1072 	case MMCBR_IVAR_POWER_MODE:
1073 		sc->host.ios.power_mode = value;
1074 		break;
1075 	case MMCBR_IVAR_VDD:
1076 		sc->host.ios.vdd = value;
1077 		break;
1078 	/* These are read-only */
1079 	case MMCBR_IVAR_CAPS:
1080 	case MMCBR_IVAR_HOST_OCR:
1081 	case MMCBR_IVAR_F_MIN:
1082 	case MMCBR_IVAR_F_MAX:
1083 	case MMCBR_IVAR_MAX_DATA:
1084 		return (EINVAL);
1085 	}
1086 	return (0);
1087 }
1088 
1089 static device_method_t dwmmc_methods[] = {
1090 	/* Bus interface */
1091 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1092 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1093 
1094 	/* mmcbr_if */
1095 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1096 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1097 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1098 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1099 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1100 
1101 	DEVMETHOD_END
1102 };
1103 
1104 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1105     sizeof(struct dwmmc_softc));
1106