xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision 6683132d54bd6d589889e43dabdc53d35e38a028)
1 /*-
2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/rman.h>
48 
49 #include <dev/mmc/bridge.h>
50 #include <dev/mmc/mmcbrvar.h>
51 
52 #include <dev/fdt/fdt_common.h>
53 #include <dev/ofw/openfirm.h>
54 #include <dev/ofw/ofw_bus.h>
55 #include <dev/ofw/ofw_bus_subr.h>
56 
57 #include <machine/bus.h>
58 #include <machine/cpu.h>
59 #include <machine/intr.h>
60 
61 #ifdef EXT_RESOURCES
62 #include <dev/extres/clk/clk.h>
63 #endif
64 
65 #include <dev/mmc/host/dwmmc_reg.h>
66 #include <dev/mmc/host/dwmmc_var.h>
67 
68 #include "opt_mmccam.h"
69 
70 #include "mmcbr_if.h"
71 
72 #define dprintf(x, arg...)
73 
74 #define	READ4(_sc, _reg) \
75 	bus_read_4((_sc)->res[0], _reg)
76 #define	WRITE4(_sc, _reg, _val) \
77 	bus_write_4((_sc)->res[0], _reg, _val)
78 
79 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
80 
81 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
82 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
83 #define	DWMMC_LOCK_INIT(_sc) \
84 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
85 	    "dwmmc", MTX_DEF)
86 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
87 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
88 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
89 
90 #define	PENDING_CMD	0x01
91 #define	PENDING_STOP	0x02
92 #define	CARD_INIT_DONE	0x04
93 
94 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
95 				|SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \
96 				|SDMMC_INTMASK_EBE)
97 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
98 				|SDMMC_INTMASK_RE)
99 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
100 				|SDMMC_INTMASK_HLE)
101 
102 #define	DES0_DIC	(1 << 1)
103 #define	DES0_LD		(1 << 2)
104 #define	DES0_FS		(1 << 3)
105 #define	DES0_CH		(1 << 4)
106 #define	DES0_ER		(1 << 5)
107 #define	DES0_CES	(1 << 30)
108 #define	DES0_OWN	(1 << 31)
109 
110 #define	DES1_BS1_MASK	0xfff
111 #define	DES1_BS1_SHIFT	0
112 
113 struct idmac_desc {
114 	uint32_t	des0;	/* control */
115 	uint32_t	des1;	/* bufsize */
116 	uint32_t	des2;	/* buf1 phys addr */
117 	uint32_t	des3;	/* buf2 phys addr or next descr */
118 };
119 
120 #define	DESC_MAX	256
121 #define	DESC_SIZE	(sizeof(struct idmac_desc) * DESC_MAX)
122 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
123 
124 static void dwmmc_next_operation(struct dwmmc_softc *);
125 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
126 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
127 static int dma_stop(struct dwmmc_softc *);
128 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
129 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
130 
131 static struct resource_spec dwmmc_spec[] = {
132 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
133 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
134 	{ -1, 0 }
135 };
136 
137 #define	HWTYPE_MASK		(0x0000ffff)
138 #define	HWFLAG_MASK		(0xffff << 16)
139 
140 static void
141 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
142 {
143 
144 	if (error != 0)
145 		return;
146 	*(bus_addr_t *)arg = segs[0].ds_addr;
147 }
148 
149 static void
150 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
151 {
152 	struct dwmmc_softc *sc;
153 	int idx;
154 
155 	if (error != 0)
156 		return;
157 
158 	sc = arg;
159 
160 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
161 
162 	for (idx = 0; idx < nsegs; idx++) {
163 		sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH);
164 		sc->desc_ring[idx].des1 = segs[idx].ds_len;
165 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
166 
167 		if (idx == 0)
168 			sc->desc_ring[idx].des0 |= DES0_FS;
169 
170 		if (idx == (nsegs - 1)) {
171 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
172 			sc->desc_ring[idx].des0 |= DES0_LD;
173 		}
174 	}
175 }
176 
177 static int
178 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
179 {
180 	int reg;
181 	int i;
182 
183 	reg = READ4(sc, SDMMC_CTRL);
184 	reg |= (reset_bits);
185 	WRITE4(sc, SDMMC_CTRL, reg);
186 
187 	/* Wait reset done */
188 	for (i = 0; i < 100; i++) {
189 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
190 			return (0);
191 		DELAY(10);
192 	}
193 
194 	device_printf(sc->dev, "Reset failed\n");
195 
196 	return (1);
197 }
198 
199 static int
200 dma_setup(struct dwmmc_softc *sc)
201 {
202 	int error;
203 	int nidx;
204 	int idx;
205 
206 	/*
207 	 * Set up TX descriptor ring, descriptors, and dma maps.
208 	 */
209 	error = bus_dma_tag_create(
210 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
211 	    4096, 0,			/* alignment, boundary */
212 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
213 	    BUS_SPACE_MAXADDR,		/* highaddr */
214 	    NULL, NULL,			/* filter, filterarg */
215 	    DESC_SIZE, 1, 		/* maxsize, nsegments */
216 	    DESC_SIZE,			/* maxsegsize */
217 	    0,				/* flags */
218 	    NULL, NULL,			/* lockfunc, lockarg */
219 	    &sc->desc_tag);
220 	if (error != 0) {
221 		device_printf(sc->dev,
222 		    "could not create ring DMA tag.\n");
223 		return (1);
224 	}
225 
226 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
227 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
228 	    &sc->desc_map);
229 	if (error != 0) {
230 		device_printf(sc->dev,
231 		    "could not allocate descriptor ring.\n");
232 		return (1);
233 	}
234 
235 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
236 	    sc->desc_ring, DESC_SIZE, dwmmc_get1paddr,
237 	    &sc->desc_ring_paddr, 0);
238 	if (error != 0) {
239 		device_printf(sc->dev,
240 		    "could not load descriptor ring map.\n");
241 		return (1);
242 	}
243 
244 	for (idx = 0; idx < sc->desc_count; idx++) {
245 		sc->desc_ring[idx].des0 = DES0_CH;
246 		sc->desc_ring[idx].des1 = 0;
247 		nidx = (idx + 1) % sc->desc_count;
248 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
249 		    (nidx * sizeof(struct idmac_desc));
250 	}
251 
252 	error = bus_dma_tag_create(
253 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
254 	    4096, 0,			/* alignment, boundary */
255 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
256 	    BUS_SPACE_MAXADDR,		/* highaddr */
257 	    NULL, NULL,			/* filter, filterarg */
258 	    sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */
259 	    sc->desc_count,		/* nsegments */
260 	    MMC_SECTOR_SIZE,		/* maxsegsize */
261 	    0,				/* flags */
262 	    NULL, NULL,			/* lockfunc, lockarg */
263 	    &sc->buf_tag);
264 	if (error != 0) {
265 		device_printf(sc->dev,
266 		    "could not create ring DMA tag.\n");
267 		return (1);
268 	}
269 
270 	error = bus_dmamap_create(sc->buf_tag, 0,
271 	    &sc->buf_map);
272 	if (error != 0) {
273 		device_printf(sc->dev,
274 		    "could not create TX buffer DMA map.\n");
275 		return (1);
276 	}
277 
278 	return (0);
279 }
280 
281 static void
282 dwmmc_cmd_done(struct dwmmc_softc *sc)
283 {
284 	struct mmc_command *cmd;
285 
286 	cmd = sc->curcmd;
287 	if (cmd == NULL)
288 		return;
289 
290 	if (cmd->flags & MMC_RSP_PRESENT) {
291 		if (cmd->flags & MMC_RSP_136) {
292 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
293 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
294 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
295 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
296 		} else {
297 			cmd->resp[3] = 0;
298 			cmd->resp[2] = 0;
299 			cmd->resp[1] = 0;
300 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
301 		}
302 	}
303 }
304 
305 static void
306 dwmmc_tasklet(struct dwmmc_softc *sc)
307 {
308 	struct mmc_command *cmd;
309 
310 	cmd = sc->curcmd;
311 	if (cmd == NULL)
312 		return;
313 
314 	if (!sc->cmd_done)
315 		return;
316 
317 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
318 		dwmmc_next_operation(sc);
319 	} else if (cmd->data && sc->dto_rcvd) {
320 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
321 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
322 		     sc->use_auto_stop) {
323 			if (sc->acd_rcvd)
324 				dwmmc_next_operation(sc);
325 		} else {
326 			dwmmc_next_operation(sc);
327 		}
328 	}
329 }
330 
331 static void
332 dwmmc_intr(void *arg)
333 {
334 	struct mmc_command *cmd;
335 	struct dwmmc_softc *sc;
336 	uint32_t reg;
337 
338 	sc = arg;
339 
340 	DWMMC_LOCK(sc);
341 
342 	cmd = sc->curcmd;
343 
344 	/* First handle SDMMC controller interrupts */
345 	reg = READ4(sc, SDMMC_MINTSTS);
346 	if (reg) {
347 		dprintf("%s 0x%08x\n", __func__, reg);
348 
349 		if (reg & DWMMC_CMD_ERR_FLAGS) {
350 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
351 				reg, cmd->opcode);
352 			cmd->error = MMC_ERR_TIMEOUT;
353 		}
354 
355 		if (reg & DWMMC_DATA_ERR_FLAGS) {
356 			dprintf("data err 0x%08x cmd 0x%08x\n",
357 				reg, cmd->opcode);
358 			cmd->error = MMC_ERR_FAILED;
359 			if (!sc->use_pio) {
360 				dma_done(sc, cmd);
361 				dma_stop(sc);
362 			}
363 		}
364 
365 		if (reg & SDMMC_INTMASK_CMD_DONE) {
366 			dwmmc_cmd_done(sc);
367 			sc->cmd_done = 1;
368 		}
369 
370 		if (reg & SDMMC_INTMASK_ACD)
371 			sc->acd_rcvd = 1;
372 
373 		if (reg & SDMMC_INTMASK_DTO)
374 			sc->dto_rcvd = 1;
375 
376 		if (reg & SDMMC_INTMASK_CD) {
377 			/* XXX: Handle card detect */
378 		}
379 	}
380 
381 	/* Ack interrupts */
382 	WRITE4(sc, SDMMC_RINTSTS, reg);
383 
384 	if (sc->use_pio) {
385 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
386 			pio_read(sc, cmd);
387 		}
388 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
389 			pio_write(sc, cmd);
390 		}
391 	} else {
392 		/* Now handle DMA interrupts */
393 		reg = READ4(sc, SDMMC_IDSTS);
394 		if (reg) {
395 			dprintf("dma intr 0x%08x\n", reg);
396 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
397 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
398 							 SDMMC_IDINTEN_RI));
399 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
400 				dma_done(sc, cmd);
401 			}
402 		}
403 	}
404 
405 	dwmmc_tasklet(sc);
406 
407 	DWMMC_UNLOCK(sc);
408 }
409 
410 static int
411 parse_fdt(struct dwmmc_softc *sc)
412 {
413 	pcell_t dts_value[3];
414 	phandle_t node;
415 	uint32_t bus_hz = 0, bus_width;
416 	int len;
417 #ifdef EXT_RESOURCES
418 	int error;
419 #endif
420 
421 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
422 		return (ENXIO);
423 
424 	/* bus-width */
425 	if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
426 		bus_width = 4;
427 	if (bus_width >= 4)
428 		sc->host.caps |= MMC_CAP_4_BIT_DATA;
429 	if (bus_width >= 8)
430 		sc->host.caps |= MMC_CAP_8_BIT_DATA;
431 
432 	/* max-frequency */
433 	if (OF_getencprop(node, "max-frequency", &sc->max_hz, sizeof(uint32_t)) <= 0)
434 		sc->max_hz = 200000000;
435 
436 	/* fifo-depth */
437 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
438 		OF_getencprop(node, "fifo-depth", dts_value, len);
439 		sc->fifo_depth = dts_value[0];
440 	}
441 
442 	/* num-slots (Deprecated) */
443 	sc->num_slots = 1;
444 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
445 		device_printf(sc->dev, "num-slots property is deprecated\n");
446 		OF_getencprop(node, "num-slots", dts_value, len);
447 		sc->num_slots = dts_value[0];
448 	}
449 
450 	/* clock-frequency */
451 	if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
452 		OF_getencprop(node, "clock-frequency", dts_value, len);
453 		bus_hz = dts_value[0];
454 	}
455 
456 #ifdef EXT_RESOURCES
457 	/* BIU (Bus Interface Unit clock) is optional */
458 	error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
459 	if (sc->biu) {
460 		error = clk_enable(sc->biu);
461 		if (error != 0) {
462 			device_printf(sc->dev, "cannot enable biu clock\n");
463 			goto fail;
464 		}
465 	}
466 
467 	/*
468 	 * CIU (Controller Interface Unit clock) is mandatory
469 	 * if no clock-frequency property is given
470 	 */
471 	error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
472 	if (sc->ciu) {
473 		error = clk_enable(sc->ciu);
474 		if (error != 0) {
475 			device_printf(sc->dev, "cannot enable ciu clock\n");
476 			goto fail;
477 		}
478 		if (bus_hz != 0) {
479 			error = clk_set_freq(sc->ciu, bus_hz, 0);
480 			if (error != 0)
481 				device_printf(sc->dev,
482 				    "cannot set ciu clock to %u\n", bus_hz);
483 		}
484 		clk_get_freq(sc->ciu, &sc->bus_hz);
485 	}
486 #endif /* EXT_RESOURCES */
487 
488 	if (sc->bus_hz == 0) {
489 		device_printf(sc->dev, "No bus speed provided\n");
490 		goto fail;
491 	}
492 
493 	return (0);
494 
495 fail:
496 	return (ENXIO);
497 }
498 
499 int
500 dwmmc_attach(device_t dev)
501 {
502 	struct dwmmc_softc *sc;
503 	int error;
504 	int slot;
505 
506 	sc = device_get_softc(dev);
507 
508 	sc->dev = dev;
509 
510 	/* Why not to use Auto Stop? It save a hundred of irq per second */
511 	sc->use_auto_stop = 1;
512 
513 	error = parse_fdt(sc);
514 	if (error != 0) {
515 		device_printf(dev, "Can't get FDT property.\n");
516 		return (ENXIO);
517 	}
518 
519 	DWMMC_LOCK_INIT(sc);
520 
521 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
522 		device_printf(dev, "could not allocate resources\n");
523 		return (ENXIO);
524 	}
525 
526 	/* Setup interrupt handler. */
527 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
528 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
529 	if (error != 0) {
530 		device_printf(dev, "could not setup interrupt handler.\n");
531 		return (ENXIO);
532 	}
533 
534 	device_printf(dev, "Hardware version ID is %04x\n",
535 		READ4(sc, SDMMC_VERID) & 0xffff);
536 
537 	if (sc->desc_count == 0)
538 		sc->desc_count = DESC_MAX;
539 
540 	/* XXX: we support operation for slot index 0 only */
541 	slot = 0;
542 	if (sc->pwren_inverted) {
543 		WRITE4(sc, SDMMC_PWREN, (0 << slot));
544 	} else {
545 		WRITE4(sc, SDMMC_PWREN, (1 << slot));
546 	}
547 
548 	/* Reset all */
549 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
550 				  SDMMC_CTRL_FIFO_RESET |
551 				  SDMMC_CTRL_DMA_RESET)))
552 		return (ENXIO);
553 
554 	dwmmc_setup_bus(sc, sc->host.f_min);
555 
556 	if (sc->fifo_depth == 0) {
557 		sc->fifo_depth = 1 +
558 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
559 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
560 		    sc->fifo_depth);
561 	}
562 
563 	if (!sc->use_pio) {
564 		if (dma_setup(sc))
565 			return (ENXIO);
566 
567 		/* Install desc base */
568 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
569 
570 		/* Enable DMA interrupts */
571 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
572 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
573 					   SDMMC_IDINTEN_RI |
574 					   SDMMC_IDINTEN_TI));
575 	}
576 
577 	/* Clear and disable interrups for a while */
578 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
579 	WRITE4(sc, SDMMC_INTMASK, 0);
580 
581 	/* Maximum timeout */
582 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
583 
584 	/* Enable interrupts */
585 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
586 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
587 				   SDMMC_INTMASK_DTO |
588 				   SDMMC_INTMASK_ACD |
589 				   SDMMC_INTMASK_TXDR |
590 				   SDMMC_INTMASK_RXDR |
591 				   DWMMC_ERR_FLAGS |
592 				   SDMMC_INTMASK_CD));
593 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
594 
595 	sc->host.f_min = 400000;
596 	sc->host.f_max = sc->max_hz;
597 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
598 	sc->host.caps |= MMC_CAP_HSPEED;
599 	sc->host.caps |= MMC_CAP_SIGNALING_330;
600 
601 	device_add_child(dev, "mmc", -1);
602 	return (bus_generic_attach(dev));
603 }
604 
605 static int
606 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
607 {
608 	int tout;
609 	int div;
610 
611 	if (freq == 0) {
612 		WRITE4(sc, SDMMC_CLKENA, 0);
613 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
614 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
615 
616 		tout = 1000;
617 		do {
618 			if (tout-- < 0) {
619 				device_printf(sc->dev, "Failed update clk\n");
620 				return (1);
621 			}
622 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
623 
624 		return (0);
625 	}
626 
627 	WRITE4(sc, SDMMC_CLKENA, 0);
628 	WRITE4(sc, SDMMC_CLKSRC, 0);
629 
630 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
631 
632 	WRITE4(sc, SDMMC_CLKDIV, div);
633 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
634 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
635 
636 	tout = 1000;
637 	do {
638 		if (tout-- < 0) {
639 			device_printf(sc->dev, "Failed to update clk");
640 			return (1);
641 		}
642 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
643 
644 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
645 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
646 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
647 
648 	tout = 1000;
649 	do {
650 		if (tout-- < 0) {
651 			device_printf(sc->dev, "Failed to enable clk\n");
652 			return (1);
653 		}
654 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
655 
656 	return (0);
657 }
658 
659 static int
660 dwmmc_update_ios(device_t brdev, device_t reqdev)
661 {
662 	struct dwmmc_softc *sc;
663 	struct mmc_ios *ios;
664 	uint32_t reg;
665 	int ret = 0;
666 
667 	sc = device_get_softc(brdev);
668 	ios = &sc->host.ios;
669 
670 	dprintf("Setting up clk %u bus_width %d\n",
671 		ios->clock, ios->bus_width);
672 
673 	if (ios->bus_width == bus_width_8)
674 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
675 	else if (ios->bus_width == bus_width_4)
676 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
677 	else
678 		WRITE4(sc, SDMMC_CTYPE, 0);
679 
680 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
681 		/* XXX: take care about DDR or SDR use here */
682 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
683 	}
684 
685 	/* Set DDR mode */
686 	reg = READ4(sc, SDMMC_UHS_REG);
687 	if (ios->timing == bus_timing_uhs_ddr50 ||
688 	    ios->timing == bus_timing_mmc_ddr52 ||
689 	    ios->timing == bus_timing_mmc_hs400)
690 		reg |= (SDMMC_UHS_REG_DDR);
691 	else
692 		reg &= ~(SDMMC_UHS_REG_DDR);
693 	WRITE4(sc, SDMMC_UHS_REG, reg);
694 
695 	if (sc->update_ios)
696 		ret = sc->update_ios(sc, ios);
697 
698 	dwmmc_setup_bus(sc, ios->clock);
699 
700 	return (ret);
701 }
702 
703 static int
704 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
705 {
706 	struct mmc_data *data;
707 
708 	data = cmd->data;
709 
710 	if (data->flags & MMC_DATA_WRITE)
711 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
712 			BUS_DMASYNC_POSTWRITE);
713 	else
714 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
715 			BUS_DMASYNC_POSTREAD);
716 
717 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
718 	    BUS_DMASYNC_POSTWRITE);
719 
720 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
721 
722 	return (0);
723 }
724 
725 static int
726 dma_stop(struct dwmmc_softc *sc)
727 {
728 	int reg;
729 
730 	reg = READ4(sc, SDMMC_CTRL);
731 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
732 	reg |= (SDMMC_CTRL_DMA_RESET);
733 	WRITE4(sc, SDMMC_CTRL, reg);
734 
735 	reg = READ4(sc, SDMMC_BMOD);
736 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
737 	reg |= (SDMMC_BMOD_SWR);
738 	WRITE4(sc, SDMMC_BMOD, reg);
739 
740 	return (0);
741 }
742 
743 static int
744 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
745 {
746 	struct mmc_data *data;
747 	int err;
748 	int reg;
749 
750 	data = cmd->data;
751 
752 	reg = READ4(sc, SDMMC_INTMASK);
753 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
754 	WRITE4(sc, SDMMC_INTMASK, reg);
755 
756 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
757 		data->data, data->len, dwmmc_ring_setup,
758 		sc, BUS_DMA_NOWAIT);
759 	if (err != 0)
760 		panic("dmamap_load failed\n");
761 
762 	/* Ensure the device can see the desc */
763 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
764 	    BUS_DMASYNC_PREWRITE);
765 
766 	if (data->flags & MMC_DATA_WRITE)
767 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
768 			BUS_DMASYNC_PREWRITE);
769 	else
770 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
771 			BUS_DMASYNC_PREREAD);
772 
773 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
774 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
775 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
776 
777 	WRITE4(sc, SDMMC_FIFOTH, reg);
778 	wmb();
779 
780 	reg = READ4(sc, SDMMC_CTRL);
781 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
782 	WRITE4(sc, SDMMC_CTRL, reg);
783 	wmb();
784 
785 	reg = READ4(sc, SDMMC_BMOD);
786 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
787 	WRITE4(sc, SDMMC_BMOD, reg);
788 
789 	/* Start */
790 	WRITE4(sc, SDMMC_PLDMND, 1);
791 
792 	return (0);
793 }
794 
795 static int
796 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
797 {
798 	struct mmc_data *data;
799 	int reg;
800 
801 	data = cmd->data;
802 	data->xfer_len = 0;
803 
804 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
805 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
806 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
807 
808 	WRITE4(sc, SDMMC_FIFOTH, reg);
809 	wmb();
810 
811 	return (0);
812 }
813 
814 static void
815 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
816 {
817 	struct mmc_data *data;
818 	uint32_t *p, status;
819 
820 	if (cmd == NULL || cmd->data == NULL)
821 		return;
822 
823 	data = cmd->data;
824 	if ((data->flags & MMC_DATA_READ) == 0)
825 		return;
826 
827 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
828 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
829 
830 	while (data->xfer_len < data->len) {
831 		status = READ4(sc, SDMMC_STATUS);
832 		if (status & SDMMC_STATUS_FIFO_EMPTY)
833 			break;
834 		*p++ = READ4(sc, SDMMC_DATA);
835 		data->xfer_len += 4;
836 	}
837 
838 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
839 }
840 
841 static void
842 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
843 {
844 	struct mmc_data *data;
845 	uint32_t *p, status;
846 
847 	if (cmd == NULL || cmd->data == NULL)
848 		return;
849 
850 	data = cmd->data;
851 	if ((data->flags & MMC_DATA_WRITE) == 0)
852 		return;
853 
854 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
855 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
856 
857 	while (data->xfer_len < data->len) {
858 		status = READ4(sc, SDMMC_STATUS);
859 		if (status & SDMMC_STATUS_FIFO_FULL)
860 			break;
861 		WRITE4(sc, SDMMC_DATA, *p++);
862 		data->xfer_len += 4;
863 	}
864 
865 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
866 }
867 
868 static void
869 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
870 {
871 	struct mmc_data *data;
872 	uint32_t blksz;
873 	uint32_t cmdr;
874 
875 	sc->curcmd = cmd;
876 	data = cmd->data;
877 
878 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
879 		dwmmc_setup_bus(sc, sc->host.ios.clock);
880 
881 	/* XXX Upper layers don't always set this */
882 	cmd->mrq = sc->req;
883 
884 	/* Begin setting up command register. */
885 
886 	cmdr = cmd->opcode;
887 
888 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
889 
890 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
891 	    cmd->opcode == MMC_GO_IDLE_STATE ||
892 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
893 		cmdr |= SDMMC_CMD_STOP_ABORT;
894 	else if (cmd->opcode != MMC_SEND_STATUS && data)
895 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
896 
897 	/* Set up response handling. */
898 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
899 		cmdr |= SDMMC_CMD_RESP_EXP;
900 		if (cmd->flags & MMC_RSP_136)
901 			cmdr |= SDMMC_CMD_RESP_LONG;
902 	}
903 
904 	if (cmd->flags & MMC_RSP_CRC)
905 		cmdr |= SDMMC_CMD_RESP_CRC;
906 
907 	/*
908 	 * XXX: Not all platforms want this.
909 	 */
910 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
911 
912 	if ((sc->flags & CARD_INIT_DONE) == 0) {
913 		sc->flags |= (CARD_INIT_DONE);
914 		cmdr |= SDMMC_CMD_SEND_INIT;
915 	}
916 
917 	if (data) {
918 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
919 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
920 		     sc->use_auto_stop)
921 			cmdr |= SDMMC_CMD_SEND_ASTOP;
922 
923 		cmdr |= SDMMC_CMD_DATA_EXP;
924 		if (data->flags & MMC_DATA_STREAM)
925 			cmdr |= SDMMC_CMD_MODE_STREAM;
926 		if (data->flags & MMC_DATA_WRITE)
927 			cmdr |= SDMMC_CMD_DATA_WRITE;
928 
929 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
930 		WRITE4(sc, SDMMC_BYTCNT, data->len);
931 		blksz = (data->len < MMC_SECTOR_SIZE) ? \
932 			 data->len : MMC_SECTOR_SIZE;
933 		WRITE4(sc, SDMMC_BLKSIZ, blksz);
934 
935 		if (sc->use_pio) {
936 			pio_prepare(sc, cmd);
937 		} else {
938 			dma_prepare(sc, cmd);
939 		}
940 		wmb();
941 	}
942 
943 	dprintf("cmdr 0x%08x\n", cmdr);
944 
945 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
946 	wmb();
947 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
948 };
949 
950 static void
951 dwmmc_next_operation(struct dwmmc_softc *sc)
952 {
953 	struct mmc_request *req;
954 
955 	req = sc->req;
956 	if (req == NULL)
957 		return;
958 
959 	sc->acd_rcvd = 0;
960 	sc->dto_rcvd = 0;
961 	sc->cmd_done = 0;
962 
963 	/*
964 	 * XXX: Wait until card is still busy.
965 	 * We do need this to prevent data timeouts,
966 	 * mostly caused by multi-block write command
967 	 * followed by single-read.
968 	 */
969 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
970 		continue;
971 
972 	if (sc->flags & PENDING_CMD) {
973 		sc->flags &= ~PENDING_CMD;
974 		dwmmc_start_cmd(sc, req->cmd);
975 		return;
976 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
977 		sc->flags &= ~PENDING_STOP;
978 		dwmmc_start_cmd(sc, req->stop);
979 		return;
980 	}
981 
982 	sc->req = NULL;
983 	sc->curcmd = NULL;
984 	req->done(req);
985 }
986 
987 static int
988 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
989 {
990 	struct dwmmc_softc *sc;
991 
992 	sc = device_get_softc(brdev);
993 
994 	dprintf("%s\n", __func__);
995 
996 	DWMMC_LOCK(sc);
997 
998 	if (sc->req != NULL) {
999 		DWMMC_UNLOCK(sc);
1000 		return (EBUSY);
1001 	}
1002 
1003 	sc->req = req;
1004 	sc->flags |= PENDING_CMD;
1005 	if (sc->req->stop)
1006 		sc->flags |= PENDING_STOP;
1007 	dwmmc_next_operation(sc);
1008 
1009 	DWMMC_UNLOCK(sc);
1010 	return (0);
1011 }
1012 
1013 static int
1014 dwmmc_get_ro(device_t brdev, device_t reqdev)
1015 {
1016 
1017 	dprintf("%s\n", __func__);
1018 
1019 	return (0);
1020 }
1021 
1022 static int
1023 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1024 {
1025 	struct dwmmc_softc *sc;
1026 
1027 	sc = device_get_softc(brdev);
1028 
1029 	DWMMC_LOCK(sc);
1030 	while (sc->bus_busy)
1031 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1032 	sc->bus_busy++;
1033 	DWMMC_UNLOCK(sc);
1034 	return (0);
1035 }
1036 
1037 static int
1038 dwmmc_release_host(device_t brdev, device_t reqdev)
1039 {
1040 	struct dwmmc_softc *sc;
1041 
1042 	sc = device_get_softc(brdev);
1043 
1044 	DWMMC_LOCK(sc);
1045 	sc->bus_busy--;
1046 	wakeup(sc);
1047 	DWMMC_UNLOCK(sc);
1048 	return (0);
1049 }
1050 
1051 static int
1052 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1053 {
1054 	struct dwmmc_softc *sc;
1055 
1056 	sc = device_get_softc(bus);
1057 
1058 	switch (which) {
1059 	default:
1060 		return (EINVAL);
1061 	case MMCBR_IVAR_BUS_MODE:
1062 		*(int *)result = sc->host.ios.bus_mode;
1063 		break;
1064 	case MMCBR_IVAR_BUS_WIDTH:
1065 		*(int *)result = sc->host.ios.bus_width;
1066 		break;
1067 	case MMCBR_IVAR_CHIP_SELECT:
1068 		*(int *)result = sc->host.ios.chip_select;
1069 		break;
1070 	case MMCBR_IVAR_CLOCK:
1071 		*(int *)result = sc->host.ios.clock;
1072 		break;
1073 	case MMCBR_IVAR_F_MIN:
1074 		*(int *)result = sc->host.f_min;
1075 		break;
1076 	case MMCBR_IVAR_F_MAX:
1077 		*(int *)result = sc->host.f_max;
1078 		break;
1079 	case MMCBR_IVAR_HOST_OCR:
1080 		*(int *)result = sc->host.host_ocr;
1081 		break;
1082 	case MMCBR_IVAR_MODE:
1083 		*(int *)result = sc->host.mode;
1084 		break;
1085 	case MMCBR_IVAR_OCR:
1086 		*(int *)result = sc->host.ocr;
1087 		break;
1088 	case MMCBR_IVAR_POWER_MODE:
1089 		*(int *)result = sc->host.ios.power_mode;
1090 		break;
1091 	case MMCBR_IVAR_VDD:
1092 		*(int *)result = sc->host.ios.vdd;
1093 		break;
1094 	case MMCBR_IVAR_CAPS:
1095 		*(int *)result = sc->host.caps;
1096 		break;
1097 	case MMCBR_IVAR_MAX_DATA:
1098 		*(int *)result = sc->desc_count;
1099 		break;
1100 	case MMCBR_IVAR_TIMING:
1101 		*(int *)result = sc->host.ios.timing;
1102 		break;
1103 	}
1104 	return (0);
1105 }
1106 
1107 static int
1108 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1109 {
1110 	struct dwmmc_softc *sc;
1111 
1112 	sc = device_get_softc(bus);
1113 
1114 	switch (which) {
1115 	default:
1116 		return (EINVAL);
1117 	case MMCBR_IVAR_BUS_MODE:
1118 		sc->host.ios.bus_mode = value;
1119 		break;
1120 	case MMCBR_IVAR_BUS_WIDTH:
1121 		sc->host.ios.bus_width = value;
1122 		break;
1123 	case MMCBR_IVAR_CHIP_SELECT:
1124 		sc->host.ios.chip_select = value;
1125 		break;
1126 	case MMCBR_IVAR_CLOCK:
1127 		sc->host.ios.clock = value;
1128 		break;
1129 	case MMCBR_IVAR_MODE:
1130 		sc->host.mode = value;
1131 		break;
1132 	case MMCBR_IVAR_OCR:
1133 		sc->host.ocr = value;
1134 		break;
1135 	case MMCBR_IVAR_POWER_MODE:
1136 		sc->host.ios.power_mode = value;
1137 		break;
1138 	case MMCBR_IVAR_VDD:
1139 		sc->host.ios.vdd = value;
1140 		break;
1141 	case MMCBR_IVAR_TIMING:
1142 		sc->host.ios.timing = value;
1143 		break;
1144 
1145 	/* Not handled */
1146 	case MMCBR_IVAR_VCCQ:
1147 		return (0);
1148 		break;
1149 	/* These are read-only */
1150 	case MMCBR_IVAR_CAPS:
1151 	case MMCBR_IVAR_HOST_OCR:
1152 	case MMCBR_IVAR_F_MIN:
1153 	case MMCBR_IVAR_F_MAX:
1154 	case MMCBR_IVAR_MAX_DATA:
1155 		return (EINVAL);
1156 	}
1157 	return (0);
1158 }
1159 
1160 static device_method_t dwmmc_methods[] = {
1161 	/* Bus interface */
1162 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1163 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1164 
1165 	/* mmcbr_if */
1166 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1167 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1168 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1169 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1170 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1171 
1172 	DEVMETHOD_END
1173 };
1174 
1175 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1176     sizeof(struct dwmmc_softc));
1177