xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision 5bf5ca772c6de2d53344a78cf461447cc322ccea)
1 /*-
2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/module.h>
44 #include <sys/malloc.h>
45 #include <sys/rman.h>
46 
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcbrvar.h>
49 
50 #include <dev/fdt/fdt_common.h>
51 #include <dev/ofw/openfirm.h>
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54 
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/intr.h>
58 
59 #ifdef EXT_RESOURCES
60 #include <dev/extres/clk/clk.h>
61 #endif
62 
63 #include <dev/mmc/host/dwmmc_reg.h>
64 #include <dev/mmc/host/dwmmc_var.h>
65 
66 #include "opt_mmccam.h"
67 
68 #include "mmcbr_if.h"
69 
70 #define dprintf(x, arg...)
71 
72 #define	READ4(_sc, _reg) \
73 	bus_read_4((_sc)->res[0], _reg)
74 #define	WRITE4(_sc, _reg, _val) \
75 	bus_write_4((_sc)->res[0], _reg, _val)
76 
77 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
78 
79 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
80 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
81 #define	DWMMC_LOCK_INIT(_sc) \
82 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
83 	    "dwmmc", MTX_DEF)
84 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
85 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
86 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
87 
88 #define	PENDING_CMD	0x01
89 #define	PENDING_STOP	0x02
90 #define	CARD_INIT_DONE	0x04
91 
92 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
93 				|SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \
94 				|SDMMC_INTMASK_EBE)
95 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
96 				|SDMMC_INTMASK_RE)
97 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
98 				|SDMMC_INTMASK_HLE)
99 
100 #define	DES0_DIC	(1 << 1)
101 #define	DES0_LD		(1 << 2)
102 #define	DES0_FS		(1 << 3)
103 #define	DES0_CH		(1 << 4)
104 #define	DES0_ER		(1 << 5)
105 #define	DES0_CES	(1 << 30)
106 #define	DES0_OWN	(1 << 31)
107 
108 #define	DES1_BS1_MASK	0xfff
109 #define	DES1_BS1_SHIFT	0
110 
111 struct idmac_desc {
112 	uint32_t	des0;	/* control */
113 	uint32_t	des1;	/* bufsize */
114 	uint32_t	des2;	/* buf1 phys addr */
115 	uint32_t	des3;	/* buf2 phys addr or next descr */
116 };
117 
118 #define	DESC_MAX	256
119 #define	DESC_SIZE	(sizeof(struct idmac_desc) * DESC_MAX)
120 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
121 
122 static void dwmmc_next_operation(struct dwmmc_softc *);
123 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
124 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
125 static int dma_stop(struct dwmmc_softc *);
126 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
127 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
128 
129 static struct resource_spec dwmmc_spec[] = {
130 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
131 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
132 	{ -1, 0 }
133 };
134 
135 #define	HWTYPE_MASK		(0x0000ffff)
136 #define	HWFLAG_MASK		(0xffff << 16)
137 
138 static void
139 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
140 {
141 
142 	if (error != 0)
143 		return;
144 	*(bus_addr_t *)arg = segs[0].ds_addr;
145 }
146 
147 static void
148 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
149 {
150 	struct dwmmc_softc *sc;
151 	int idx;
152 
153 	if (error != 0)
154 		return;
155 
156 	sc = arg;
157 
158 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
159 
160 	for (idx = 0; idx < nsegs; idx++) {
161 		sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH);
162 		sc->desc_ring[idx].des1 = segs[idx].ds_len;
163 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
164 
165 		if (idx == 0)
166 			sc->desc_ring[idx].des0 |= DES0_FS;
167 
168 		if (idx == (nsegs - 1)) {
169 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
170 			sc->desc_ring[idx].des0 |= DES0_LD;
171 		}
172 	}
173 }
174 
175 static int
176 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
177 {
178 	int reg;
179 	int i;
180 
181 	reg = READ4(sc, SDMMC_CTRL);
182 	reg |= (reset_bits);
183 	WRITE4(sc, SDMMC_CTRL, reg);
184 
185 	/* Wait reset done */
186 	for (i = 0; i < 100; i++) {
187 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
188 			return (0);
189 		DELAY(10);
190 	}
191 
192 	device_printf(sc->dev, "Reset failed\n");
193 
194 	return (1);
195 }
196 
197 static int
198 dma_setup(struct dwmmc_softc *sc)
199 {
200 	int error;
201 	int nidx;
202 	int idx;
203 
204 	/*
205 	 * Set up TX descriptor ring, descriptors, and dma maps.
206 	 */
207 	error = bus_dma_tag_create(
208 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
209 	    4096, 0,			/* alignment, boundary */
210 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
211 	    BUS_SPACE_MAXADDR,		/* highaddr */
212 	    NULL, NULL,			/* filter, filterarg */
213 	    DESC_SIZE, 1, 		/* maxsize, nsegments */
214 	    DESC_SIZE,			/* maxsegsize */
215 	    0,				/* flags */
216 	    NULL, NULL,			/* lockfunc, lockarg */
217 	    &sc->desc_tag);
218 	if (error != 0) {
219 		device_printf(sc->dev,
220 		    "could not create ring DMA tag.\n");
221 		return (1);
222 	}
223 
224 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
225 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
226 	    &sc->desc_map);
227 	if (error != 0) {
228 		device_printf(sc->dev,
229 		    "could not allocate descriptor ring.\n");
230 		return (1);
231 	}
232 
233 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
234 	    sc->desc_ring, DESC_SIZE, dwmmc_get1paddr,
235 	    &sc->desc_ring_paddr, 0);
236 	if (error != 0) {
237 		device_printf(sc->dev,
238 		    "could not load descriptor ring map.\n");
239 		return (1);
240 	}
241 
242 	for (idx = 0; idx < sc->desc_count; idx++) {
243 		sc->desc_ring[idx].des0 = DES0_CH;
244 		sc->desc_ring[idx].des1 = 0;
245 		nidx = (idx + 1) % sc->desc_count;
246 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
247 		    (nidx * sizeof(struct idmac_desc));
248 	}
249 
250 	error = bus_dma_tag_create(
251 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
252 	    4096, 0,			/* alignment, boundary */
253 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
254 	    BUS_SPACE_MAXADDR,		/* highaddr */
255 	    NULL, NULL,			/* filter, filterarg */
256 	    sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */
257 	    sc->desc_count,		/* nsegments */
258 	    MMC_SECTOR_SIZE,		/* maxsegsize */
259 	    0,				/* flags */
260 	    NULL, NULL,			/* lockfunc, lockarg */
261 	    &sc->buf_tag);
262 	if (error != 0) {
263 		device_printf(sc->dev,
264 		    "could not create ring DMA tag.\n");
265 		return (1);
266 	}
267 
268 	error = bus_dmamap_create(sc->buf_tag, 0,
269 	    &sc->buf_map);
270 	if (error != 0) {
271 		device_printf(sc->dev,
272 		    "could not create TX buffer DMA map.\n");
273 		return (1);
274 	}
275 
276 	return (0);
277 }
278 
279 static void
280 dwmmc_cmd_done(struct dwmmc_softc *sc)
281 {
282 	struct mmc_command *cmd;
283 
284 	cmd = sc->curcmd;
285 	if (cmd == NULL)
286 		return;
287 
288 	if (cmd->flags & MMC_RSP_PRESENT) {
289 		if (cmd->flags & MMC_RSP_136) {
290 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
291 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
292 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
293 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
294 		} else {
295 			cmd->resp[3] = 0;
296 			cmd->resp[2] = 0;
297 			cmd->resp[1] = 0;
298 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
299 		}
300 	}
301 }
302 
303 static void
304 dwmmc_tasklet(struct dwmmc_softc *sc)
305 {
306 	struct mmc_command *cmd;
307 
308 	cmd = sc->curcmd;
309 	if (cmd == NULL)
310 		return;
311 
312 	if (!sc->cmd_done)
313 		return;
314 
315 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
316 		dwmmc_next_operation(sc);
317 	} else if (cmd->data && sc->dto_rcvd) {
318 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
319 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
320 		     sc->use_auto_stop) {
321 			if (sc->acd_rcvd)
322 				dwmmc_next_operation(sc);
323 		} else {
324 			dwmmc_next_operation(sc);
325 		}
326 	}
327 }
328 
329 static void
330 dwmmc_intr(void *arg)
331 {
332 	struct mmc_command *cmd;
333 	struct dwmmc_softc *sc;
334 	uint32_t reg;
335 
336 	sc = arg;
337 
338 	DWMMC_LOCK(sc);
339 
340 	cmd = sc->curcmd;
341 
342 	/* First handle SDMMC controller interrupts */
343 	reg = READ4(sc, SDMMC_MINTSTS);
344 	if (reg) {
345 		dprintf("%s 0x%08x\n", __func__, reg);
346 
347 		if (reg & DWMMC_CMD_ERR_FLAGS) {
348 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
349 				reg, cmd->opcode);
350 			cmd->error = MMC_ERR_TIMEOUT;
351 		}
352 
353 		if (reg & DWMMC_DATA_ERR_FLAGS) {
354 			dprintf("data err 0x%08x cmd 0x%08x\n",
355 				reg, cmd->opcode);
356 			cmd->error = MMC_ERR_FAILED;
357 			if (!sc->use_pio) {
358 				dma_done(sc, cmd);
359 				dma_stop(sc);
360 			}
361 		}
362 
363 		if (reg & SDMMC_INTMASK_CMD_DONE) {
364 			dwmmc_cmd_done(sc);
365 			sc->cmd_done = 1;
366 		}
367 
368 		if (reg & SDMMC_INTMASK_ACD)
369 			sc->acd_rcvd = 1;
370 
371 		if (reg & SDMMC_INTMASK_DTO)
372 			sc->dto_rcvd = 1;
373 
374 		if (reg & SDMMC_INTMASK_CD) {
375 			/* XXX: Handle card detect */
376 		}
377 	}
378 
379 	/* Ack interrupts */
380 	WRITE4(sc, SDMMC_RINTSTS, reg);
381 
382 	if (sc->use_pio) {
383 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
384 			pio_read(sc, cmd);
385 		}
386 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
387 			pio_write(sc, cmd);
388 		}
389 	} else {
390 		/* Now handle DMA interrupts */
391 		reg = READ4(sc, SDMMC_IDSTS);
392 		if (reg) {
393 			dprintf("dma intr 0x%08x\n", reg);
394 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
395 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
396 							 SDMMC_IDINTEN_RI));
397 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
398 				dma_done(sc, cmd);
399 			}
400 		}
401 	}
402 
403 	dwmmc_tasklet(sc);
404 
405 	DWMMC_UNLOCK(sc);
406 }
407 
408 static int
409 parse_fdt(struct dwmmc_softc *sc)
410 {
411 	pcell_t dts_value[3];
412 	phandle_t node;
413 	uint32_t bus_hz = 0, bus_width;
414 	int len;
415 #ifdef EXT_RESOURCES
416 	int error;
417 #endif
418 
419 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
420 		return (ENXIO);
421 
422 	/* bus-width */
423 	if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
424 		bus_width = 4;
425 	if (bus_width >= 4)
426 		sc->host.caps |= MMC_CAP_4_BIT_DATA;
427 	if (bus_width >= 8)
428 		sc->host.caps |= MMC_CAP_8_BIT_DATA;
429 
430 	/* max-frequency */
431 	if (OF_getencprop(node, "max-frequency", &sc->max_hz, sizeof(uint32_t)) <= 0)
432 		sc->max_hz = 200000000;
433 
434 	/* fifo-depth */
435 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
436 		OF_getencprop(node, "fifo-depth", dts_value, len);
437 		sc->fifo_depth = dts_value[0];
438 	}
439 
440 	/* num-slots (Deprecated) */
441 	sc->num_slots = 1;
442 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
443 		device_printf(sc->dev, "num-slots property is deprecated\n");
444 		OF_getencprop(node, "num-slots", dts_value, len);
445 		sc->num_slots = dts_value[0];
446 	}
447 
448 	/* clock-frequency */
449 	if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
450 		OF_getencprop(node, "clock-frequency", dts_value, len);
451 		bus_hz = dts_value[0];
452 	}
453 
454 #ifdef EXT_RESOURCES
455 	/* BIU (Bus Interface Unit clock) is optional */
456 	error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
457 	if (sc->biu) {
458 		error = clk_enable(sc->biu);
459 		if (error != 0) {
460 			device_printf(sc->dev, "cannot enable biu clock\n");
461 			goto fail;
462 		}
463 	}
464 
465 	/*
466 	 * CIU (Controller Interface Unit clock) is mandatory
467 	 * if no clock-frequency property is given
468 	 */
469 	error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
470 	if (sc->ciu) {
471 		error = clk_enable(sc->ciu);
472 		if (error != 0) {
473 			device_printf(sc->dev, "cannot enable ciu clock\n");
474 			goto fail;
475 		}
476 		if (bus_hz != 0) {
477 			error = clk_set_freq(sc->ciu, bus_hz, 0);
478 			if (error != 0)
479 				device_printf(sc->dev,
480 				    "cannot set ciu clock to %u\n", bus_hz);
481 		}
482 		clk_get_freq(sc->ciu, &sc->bus_hz);
483 	}
484 #endif /* EXT_RESOURCES */
485 
486 	if (sc->bus_hz == 0) {
487 		device_printf(sc->dev, "No bus speed provided\n");
488 		goto fail;
489 	}
490 
491 	return (0);
492 
493 fail:
494 	return (ENXIO);
495 }
496 
497 int
498 dwmmc_attach(device_t dev)
499 {
500 	struct dwmmc_softc *sc;
501 	int error;
502 	int slot;
503 
504 	sc = device_get_softc(dev);
505 
506 	sc->dev = dev;
507 
508 	/* Why not to use Auto Stop? It save a hundred of irq per second */
509 	sc->use_auto_stop = 1;
510 
511 	error = parse_fdt(sc);
512 	if (error != 0) {
513 		device_printf(dev, "Can't get FDT property.\n");
514 		return (ENXIO);
515 	}
516 
517 	DWMMC_LOCK_INIT(sc);
518 
519 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
520 		device_printf(dev, "could not allocate resources\n");
521 		return (ENXIO);
522 	}
523 
524 	/* Setup interrupt handler. */
525 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
526 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
527 	if (error != 0) {
528 		device_printf(dev, "could not setup interrupt handler.\n");
529 		return (ENXIO);
530 	}
531 
532 	device_printf(dev, "Hardware version ID is %04x\n",
533 		READ4(sc, SDMMC_VERID) & 0xffff);
534 
535 	if (sc->desc_count == 0)
536 		sc->desc_count = DESC_MAX;
537 
538 	/* XXX: we support operation for slot index 0 only */
539 	slot = 0;
540 	if (sc->pwren_inverted) {
541 		WRITE4(sc, SDMMC_PWREN, (0 << slot));
542 	} else {
543 		WRITE4(sc, SDMMC_PWREN, (1 << slot));
544 	}
545 
546 	/* Reset all */
547 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
548 				  SDMMC_CTRL_FIFO_RESET |
549 				  SDMMC_CTRL_DMA_RESET)))
550 		return (ENXIO);
551 
552 	dwmmc_setup_bus(sc, sc->host.f_min);
553 
554 	if (sc->fifo_depth == 0) {
555 		sc->fifo_depth = 1 +
556 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
557 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
558 		    sc->fifo_depth);
559 	}
560 
561 	if (!sc->use_pio) {
562 		if (dma_setup(sc))
563 			return (ENXIO);
564 
565 		/* Install desc base */
566 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
567 
568 		/* Enable DMA interrupts */
569 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
570 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
571 					   SDMMC_IDINTEN_RI |
572 					   SDMMC_IDINTEN_TI));
573 	}
574 
575 	/* Clear and disable interrups for a while */
576 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
577 	WRITE4(sc, SDMMC_INTMASK, 0);
578 
579 	/* Maximum timeout */
580 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
581 
582 	/* Enable interrupts */
583 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
584 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
585 				   SDMMC_INTMASK_DTO |
586 				   SDMMC_INTMASK_ACD |
587 				   SDMMC_INTMASK_TXDR |
588 				   SDMMC_INTMASK_RXDR |
589 				   DWMMC_ERR_FLAGS |
590 				   SDMMC_INTMASK_CD));
591 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
592 
593 	sc->host.f_min = 400000;
594 	sc->host.f_max = sc->max_hz;
595 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
596 	sc->host.caps |= MMC_CAP_HSPEED;
597 	sc->host.caps |= MMC_CAP_SIGNALING_330;
598 
599 	device_add_child(dev, "mmc", -1);
600 	return (bus_generic_attach(dev));
601 }
602 
603 static int
604 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
605 {
606 	int tout;
607 	int div;
608 
609 	if (freq == 0) {
610 		WRITE4(sc, SDMMC_CLKENA, 0);
611 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
612 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
613 
614 		tout = 1000;
615 		do {
616 			if (tout-- < 0) {
617 				device_printf(sc->dev, "Failed update clk\n");
618 				return (1);
619 			}
620 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
621 
622 		return (0);
623 	}
624 
625 	WRITE4(sc, SDMMC_CLKENA, 0);
626 	WRITE4(sc, SDMMC_CLKSRC, 0);
627 
628 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
629 
630 	WRITE4(sc, SDMMC_CLKDIV, div);
631 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
632 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
633 
634 	tout = 1000;
635 	do {
636 		if (tout-- < 0) {
637 			device_printf(sc->dev, "Failed to update clk");
638 			return (1);
639 		}
640 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
641 
642 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
643 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
644 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
645 
646 	tout = 1000;
647 	do {
648 		if (tout-- < 0) {
649 			device_printf(sc->dev, "Failed to enable clk\n");
650 			return (1);
651 		}
652 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
653 
654 	return (0);
655 }
656 
657 static int
658 dwmmc_update_ios(device_t brdev, device_t reqdev)
659 {
660 	struct dwmmc_softc *sc;
661 	struct mmc_ios *ios;
662 	uint32_t reg;
663 	int ret = 0;
664 
665 	sc = device_get_softc(brdev);
666 	ios = &sc->host.ios;
667 
668 	dprintf("Setting up clk %u bus_width %d\n",
669 		ios->clock, ios->bus_width);
670 
671 	if (ios->bus_width == bus_width_8)
672 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
673 	else if (ios->bus_width == bus_width_4)
674 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
675 	else
676 		WRITE4(sc, SDMMC_CTYPE, 0);
677 
678 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
679 		/* XXX: take care about DDR or SDR use here */
680 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
681 	}
682 
683 	/* Set DDR mode */
684 	reg = READ4(sc, SDMMC_UHS_REG);
685 	if (ios->timing == bus_timing_uhs_ddr50 ||
686 	    ios->timing == bus_timing_mmc_ddr52 ||
687 	    ios->timing == bus_timing_mmc_hs400)
688 		reg |= (SDMMC_UHS_REG_DDR);
689 	else
690 		reg &= ~(SDMMC_UHS_REG_DDR);
691 	WRITE4(sc, SDMMC_UHS_REG, reg);
692 
693 	if (sc->update_ios)
694 		ret = sc->update_ios(sc, ios);
695 
696 	dwmmc_setup_bus(sc, ios->clock);
697 
698 	return (ret);
699 }
700 
701 static int
702 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
703 {
704 	struct mmc_data *data;
705 
706 	data = cmd->data;
707 
708 	if (data->flags & MMC_DATA_WRITE)
709 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
710 			BUS_DMASYNC_POSTWRITE);
711 	else
712 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
713 			BUS_DMASYNC_POSTREAD);
714 
715 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
716 	    BUS_DMASYNC_POSTWRITE);
717 
718 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
719 
720 	return (0);
721 }
722 
723 static int
724 dma_stop(struct dwmmc_softc *sc)
725 {
726 	int reg;
727 
728 	reg = READ4(sc, SDMMC_CTRL);
729 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
730 	reg |= (SDMMC_CTRL_DMA_RESET);
731 	WRITE4(sc, SDMMC_CTRL, reg);
732 
733 	reg = READ4(sc, SDMMC_BMOD);
734 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
735 	reg |= (SDMMC_BMOD_SWR);
736 	WRITE4(sc, SDMMC_BMOD, reg);
737 
738 	return (0);
739 }
740 
741 static int
742 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
743 {
744 	struct mmc_data *data;
745 	int err;
746 	int reg;
747 
748 	data = cmd->data;
749 
750 	reg = READ4(sc, SDMMC_INTMASK);
751 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
752 	WRITE4(sc, SDMMC_INTMASK, reg);
753 
754 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
755 		data->data, data->len, dwmmc_ring_setup,
756 		sc, BUS_DMA_NOWAIT);
757 	if (err != 0)
758 		panic("dmamap_load failed\n");
759 
760 	/* Ensure the device can see the desc */
761 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
762 	    BUS_DMASYNC_PREWRITE);
763 
764 	if (data->flags & MMC_DATA_WRITE)
765 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
766 			BUS_DMASYNC_PREWRITE);
767 	else
768 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
769 			BUS_DMASYNC_PREREAD);
770 
771 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
772 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
773 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
774 
775 	WRITE4(sc, SDMMC_FIFOTH, reg);
776 	wmb();
777 
778 	reg = READ4(sc, SDMMC_CTRL);
779 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
780 	WRITE4(sc, SDMMC_CTRL, reg);
781 	wmb();
782 
783 	reg = READ4(sc, SDMMC_BMOD);
784 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
785 	WRITE4(sc, SDMMC_BMOD, reg);
786 
787 	/* Start */
788 	WRITE4(sc, SDMMC_PLDMND, 1);
789 
790 	return (0);
791 }
792 
793 static int
794 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
795 {
796 	struct mmc_data *data;
797 	int reg;
798 
799 	data = cmd->data;
800 	data->xfer_len = 0;
801 
802 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
803 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
804 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
805 
806 	WRITE4(sc, SDMMC_FIFOTH, reg);
807 	wmb();
808 
809 	return (0);
810 }
811 
812 static void
813 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
814 {
815 	struct mmc_data *data;
816 	uint32_t *p, status;
817 
818 	if (cmd == NULL || cmd->data == NULL)
819 		return;
820 
821 	data = cmd->data;
822 	if ((data->flags & MMC_DATA_READ) == 0)
823 		return;
824 
825 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
826 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
827 
828 	while (data->xfer_len < data->len) {
829 		status = READ4(sc, SDMMC_STATUS);
830 		if (status & SDMMC_STATUS_FIFO_EMPTY)
831 			break;
832 		*p++ = READ4(sc, SDMMC_DATA);
833 		data->xfer_len += 4;
834 	}
835 
836 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
837 }
838 
839 static void
840 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
841 {
842 	struct mmc_data *data;
843 	uint32_t *p, status;
844 
845 	if (cmd == NULL || cmd->data == NULL)
846 		return;
847 
848 	data = cmd->data;
849 	if ((data->flags & MMC_DATA_WRITE) == 0)
850 		return;
851 
852 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
853 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
854 
855 	while (data->xfer_len < data->len) {
856 		status = READ4(sc, SDMMC_STATUS);
857 		if (status & SDMMC_STATUS_FIFO_FULL)
858 			break;
859 		WRITE4(sc, SDMMC_DATA, *p++);
860 		data->xfer_len += 4;
861 	}
862 
863 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
864 }
865 
866 static void
867 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
868 {
869 	struct mmc_data *data;
870 	uint32_t blksz;
871 	uint32_t cmdr;
872 
873 	sc->curcmd = cmd;
874 	data = cmd->data;
875 
876 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
877 		dwmmc_setup_bus(sc, sc->host.ios.clock);
878 
879 	/* XXX Upper layers don't always set this */
880 	cmd->mrq = sc->req;
881 
882 	/* Begin setting up command register. */
883 
884 	cmdr = cmd->opcode;
885 
886 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
887 
888 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
889 	    cmd->opcode == MMC_GO_IDLE_STATE ||
890 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
891 		cmdr |= SDMMC_CMD_STOP_ABORT;
892 	else if (cmd->opcode != MMC_SEND_STATUS && data)
893 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
894 
895 	/* Set up response handling. */
896 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
897 		cmdr |= SDMMC_CMD_RESP_EXP;
898 		if (cmd->flags & MMC_RSP_136)
899 			cmdr |= SDMMC_CMD_RESP_LONG;
900 	}
901 
902 	if (cmd->flags & MMC_RSP_CRC)
903 		cmdr |= SDMMC_CMD_RESP_CRC;
904 
905 	/*
906 	 * XXX: Not all platforms want this.
907 	 */
908 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
909 
910 	if ((sc->flags & CARD_INIT_DONE) == 0) {
911 		sc->flags |= (CARD_INIT_DONE);
912 		cmdr |= SDMMC_CMD_SEND_INIT;
913 	}
914 
915 	if (data) {
916 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
917 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
918 		     sc->use_auto_stop)
919 			cmdr |= SDMMC_CMD_SEND_ASTOP;
920 
921 		cmdr |= SDMMC_CMD_DATA_EXP;
922 		if (data->flags & MMC_DATA_STREAM)
923 			cmdr |= SDMMC_CMD_MODE_STREAM;
924 		if (data->flags & MMC_DATA_WRITE)
925 			cmdr |= SDMMC_CMD_DATA_WRITE;
926 
927 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
928 		WRITE4(sc, SDMMC_BYTCNT, data->len);
929 		blksz = (data->len < MMC_SECTOR_SIZE) ? \
930 			 data->len : MMC_SECTOR_SIZE;
931 		WRITE4(sc, SDMMC_BLKSIZ, blksz);
932 
933 		if (sc->use_pio) {
934 			pio_prepare(sc, cmd);
935 		} else {
936 			dma_prepare(sc, cmd);
937 		}
938 		wmb();
939 	}
940 
941 	dprintf("cmdr 0x%08x\n", cmdr);
942 
943 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
944 	wmb();
945 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
946 };
947 
948 static void
949 dwmmc_next_operation(struct dwmmc_softc *sc)
950 {
951 	struct mmc_request *req;
952 
953 	req = sc->req;
954 	if (req == NULL)
955 		return;
956 
957 	sc->acd_rcvd = 0;
958 	sc->dto_rcvd = 0;
959 	sc->cmd_done = 0;
960 
961 	/*
962 	 * XXX: Wait until card is still busy.
963 	 * We do need this to prevent data timeouts,
964 	 * mostly caused by multi-block write command
965 	 * followed by single-read.
966 	 */
967 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
968 		continue;
969 
970 	if (sc->flags & PENDING_CMD) {
971 		sc->flags &= ~PENDING_CMD;
972 		dwmmc_start_cmd(sc, req->cmd);
973 		return;
974 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
975 		sc->flags &= ~PENDING_STOP;
976 		dwmmc_start_cmd(sc, req->stop);
977 		return;
978 	}
979 
980 	sc->req = NULL;
981 	sc->curcmd = NULL;
982 	req->done(req);
983 }
984 
985 static int
986 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
987 {
988 	struct dwmmc_softc *sc;
989 
990 	sc = device_get_softc(brdev);
991 
992 	dprintf("%s\n", __func__);
993 
994 	DWMMC_LOCK(sc);
995 
996 	if (sc->req != NULL) {
997 		DWMMC_UNLOCK(sc);
998 		return (EBUSY);
999 	}
1000 
1001 	sc->req = req;
1002 	sc->flags |= PENDING_CMD;
1003 	if (sc->req->stop)
1004 		sc->flags |= PENDING_STOP;
1005 	dwmmc_next_operation(sc);
1006 
1007 	DWMMC_UNLOCK(sc);
1008 	return (0);
1009 }
1010 
1011 static int
1012 dwmmc_get_ro(device_t brdev, device_t reqdev)
1013 {
1014 
1015 	dprintf("%s\n", __func__);
1016 
1017 	return (0);
1018 }
1019 
1020 static int
1021 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1022 {
1023 	struct dwmmc_softc *sc;
1024 
1025 	sc = device_get_softc(brdev);
1026 
1027 	DWMMC_LOCK(sc);
1028 	while (sc->bus_busy)
1029 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1030 	sc->bus_busy++;
1031 	DWMMC_UNLOCK(sc);
1032 	return (0);
1033 }
1034 
1035 static int
1036 dwmmc_release_host(device_t brdev, device_t reqdev)
1037 {
1038 	struct dwmmc_softc *sc;
1039 
1040 	sc = device_get_softc(brdev);
1041 
1042 	DWMMC_LOCK(sc);
1043 	sc->bus_busy--;
1044 	wakeup(sc);
1045 	DWMMC_UNLOCK(sc);
1046 	return (0);
1047 }
1048 
1049 static int
1050 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1051 {
1052 	struct dwmmc_softc *sc;
1053 
1054 	sc = device_get_softc(bus);
1055 
1056 	switch (which) {
1057 	default:
1058 		return (EINVAL);
1059 	case MMCBR_IVAR_BUS_MODE:
1060 		*(int *)result = sc->host.ios.bus_mode;
1061 		break;
1062 	case MMCBR_IVAR_BUS_WIDTH:
1063 		*(int *)result = sc->host.ios.bus_width;
1064 		break;
1065 	case MMCBR_IVAR_CHIP_SELECT:
1066 		*(int *)result = sc->host.ios.chip_select;
1067 		break;
1068 	case MMCBR_IVAR_CLOCK:
1069 		*(int *)result = sc->host.ios.clock;
1070 		break;
1071 	case MMCBR_IVAR_F_MIN:
1072 		*(int *)result = sc->host.f_min;
1073 		break;
1074 	case MMCBR_IVAR_F_MAX:
1075 		*(int *)result = sc->host.f_max;
1076 		break;
1077 	case MMCBR_IVAR_HOST_OCR:
1078 		*(int *)result = sc->host.host_ocr;
1079 		break;
1080 	case MMCBR_IVAR_MODE:
1081 		*(int *)result = sc->host.mode;
1082 		break;
1083 	case MMCBR_IVAR_OCR:
1084 		*(int *)result = sc->host.ocr;
1085 		break;
1086 	case MMCBR_IVAR_POWER_MODE:
1087 		*(int *)result = sc->host.ios.power_mode;
1088 		break;
1089 	case MMCBR_IVAR_VDD:
1090 		*(int *)result = sc->host.ios.vdd;
1091 		break;
1092 	case MMCBR_IVAR_CAPS:
1093 		*(int *)result = sc->host.caps;
1094 		break;
1095 	case MMCBR_IVAR_MAX_DATA:
1096 		*(int *)result = sc->desc_count;
1097 	}
1098 	return (0);
1099 }
1100 
1101 static int
1102 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1103 {
1104 	struct dwmmc_softc *sc;
1105 
1106 	sc = device_get_softc(bus);
1107 
1108 	switch (which) {
1109 	default:
1110 		return (EINVAL);
1111 	case MMCBR_IVAR_BUS_MODE:
1112 		sc->host.ios.bus_mode = value;
1113 		break;
1114 	case MMCBR_IVAR_BUS_WIDTH:
1115 		sc->host.ios.bus_width = value;
1116 		break;
1117 	case MMCBR_IVAR_CHIP_SELECT:
1118 		sc->host.ios.chip_select = value;
1119 		break;
1120 	case MMCBR_IVAR_CLOCK:
1121 		sc->host.ios.clock = value;
1122 		break;
1123 	case MMCBR_IVAR_MODE:
1124 		sc->host.mode = value;
1125 		break;
1126 	case MMCBR_IVAR_OCR:
1127 		sc->host.ocr = value;
1128 		break;
1129 	case MMCBR_IVAR_POWER_MODE:
1130 		sc->host.ios.power_mode = value;
1131 		break;
1132 	case MMCBR_IVAR_VDD:
1133 		sc->host.ios.vdd = value;
1134 		break;
1135 	/* These are read-only */
1136 	case MMCBR_IVAR_CAPS:
1137 	case MMCBR_IVAR_HOST_OCR:
1138 	case MMCBR_IVAR_F_MIN:
1139 	case MMCBR_IVAR_F_MAX:
1140 	case MMCBR_IVAR_MAX_DATA:
1141 		return (EINVAL);
1142 	}
1143 	return (0);
1144 }
1145 
1146 static device_method_t dwmmc_methods[] = {
1147 	/* Bus interface */
1148 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1149 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1150 
1151 	/* mmcbr_if */
1152 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1153 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1154 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1155 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1156 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1157 
1158 	DEVMETHOD_END
1159 };
1160 
1161 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1162     sizeof(struct dwmmc_softc));
1163