xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision 907b59d76938e654f0d040a888e8dfca3de1e222)
1 /*-
2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/module.h>
44 #include <sys/malloc.h>
45 #include <sys/rman.h>
46 #include <sys/timeet.h>
47 #include <sys/timetc.h>
48 
49 #include <dev/mmc/bridge.h>
50 #include <dev/mmc/mmcreg.h>
51 #include <dev/mmc/mmcbrvar.h>
52 
53 #include <dev/fdt/fdt_common.h>
54 #include <dev/ofw/openfirm.h>
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
57 
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/intr.h>
61 
62 #include <dev/mmc/host/dwmmc_reg.h>
63 #include <dev/mmc/host/dwmmc_var.h>
64 
65 #include "mmcbr_if.h"
66 
67 #define dprintf(x, arg...)
68 
69 #define	READ4(_sc, _reg) \
70 	bus_read_4((_sc)->res[0], _reg)
71 #define	WRITE4(_sc, _reg, _val) \
72 	bus_write_4((_sc)->res[0], _reg, _val)
73 
74 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
75 
76 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
77 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
78 #define	DWMMC_LOCK_INIT(_sc) \
79 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
80 	    "dwmmc", MTX_DEF)
81 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
82 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
83 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
84 
85 #define	PENDING_CMD	0x01
86 #define	PENDING_STOP	0x02
87 #define	CARD_INIT_DONE	0x04
88 
89 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
90 				|SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \
91 				|SDMMC_INTMASK_EBE)
92 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
93 				|SDMMC_INTMASK_RE)
94 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
95 				|SDMMC_INTMASK_HLE)
96 
97 #define	DES0_DIC	(1 << 1)
98 #define	DES0_LD		(1 << 2)
99 #define	DES0_FS		(1 << 3)
100 #define	DES0_CH		(1 << 4)
101 #define	DES0_ER		(1 << 5)
102 #define	DES0_CES	(1 << 30)
103 #define	DES0_OWN	(1 << 31)
104 
105 #define	DES1_BS1_MASK	0xfff
106 #define	DES1_BS1_SHIFT	0
107 
108 struct idmac_desc {
109 	uint32_t	des0;	/* control */
110 	uint32_t	des1;	/* bufsize */
111 	uint32_t	des2;	/* buf1 phys addr */
112 	uint32_t	des3;	/* buf2 phys addr or next descr */
113 };
114 
115 #define	DESC_MAX	256
116 #define	DESC_SIZE	(sizeof(struct idmac_desc) * DESC_MAX)
117 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
118 
119 static void dwmmc_next_operation(struct dwmmc_softc *);
120 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
121 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
122 static int dma_stop(struct dwmmc_softc *);
123 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
124 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
125 
126 static struct resource_spec dwmmc_spec[] = {
127 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
128 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
129 	{ -1, 0 }
130 };
131 
132 #define	HWTYPE_MASK		(0x0000ffff)
133 #define	HWFLAG_MASK		(0xffff << 16)
134 
135 static struct ofw_compat_data compat_data[] = {
136 	{"altr,socfpga-dw-mshc",	HWTYPE_ALTERA},
137 	{"samsung,exynos5420-dw-mshc",	HWTYPE_EXYNOS},
138 	{"rockchip,rk2928-dw-mshc",	HWTYPE_ROCKCHIP},
139 	{NULL,				HWTYPE_NONE},
140 };
141 
142 static void
143 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
144 {
145 
146 	if (error != 0)
147 		return;
148 	*(bus_addr_t *)arg = segs[0].ds_addr;
149 }
150 
151 static void
152 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
153 {
154 	struct dwmmc_softc *sc;
155 	int idx;
156 
157 	if (error != 0)
158 		return;
159 
160 	sc = arg;
161 
162 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
163 
164 	for (idx = 0; idx < nsegs; idx++) {
165 		sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH);
166 		sc->desc_ring[idx].des1 = segs[idx].ds_len;
167 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
168 
169 		if (idx == 0)
170 			sc->desc_ring[idx].des0 |= DES0_FS;
171 
172 		if (idx == (nsegs - 1)) {
173 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
174 			sc->desc_ring[idx].des0 |= DES0_LD;
175 		}
176 	}
177 }
178 
179 static int
180 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
181 {
182 	int reg;
183 	int i;
184 
185 	reg = READ4(sc, SDMMC_CTRL);
186 	reg |= (reset_bits);
187 	WRITE4(sc, SDMMC_CTRL, reg);
188 
189 	/* Wait reset done */
190 	for (i = 0; i < 100; i++) {
191 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
192 			return (0);
193 		DELAY(10);
194 	}
195 
196 	device_printf(sc->dev, "Reset failed\n");
197 
198 	return (1);
199 }
200 
201 static int
202 dma_setup(struct dwmmc_softc *sc)
203 {
204 	int error;
205 	int nidx;
206 	int idx;
207 
208 	/*
209 	 * Set up TX descriptor ring, descriptors, and dma maps.
210 	 */
211 	error = bus_dma_tag_create(
212 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
213 	    4096, 0,			/* alignment, boundary */
214 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
215 	    BUS_SPACE_MAXADDR,		/* highaddr */
216 	    NULL, NULL,			/* filter, filterarg */
217 	    DESC_SIZE, 1, 		/* maxsize, nsegments */
218 	    DESC_SIZE,			/* maxsegsize */
219 	    0,				/* flags */
220 	    NULL, NULL,			/* lockfunc, lockarg */
221 	    &sc->desc_tag);
222 	if (error != 0) {
223 		device_printf(sc->dev,
224 		    "could not create ring DMA tag.\n");
225 		return (1);
226 	}
227 
228 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
229 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
230 	    &sc->desc_map);
231 	if (error != 0) {
232 		device_printf(sc->dev,
233 		    "could not allocate descriptor ring.\n");
234 		return (1);
235 	}
236 
237 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
238 	    sc->desc_ring, DESC_SIZE, dwmmc_get1paddr,
239 	    &sc->desc_ring_paddr, 0);
240 	if (error != 0) {
241 		device_printf(sc->dev,
242 		    "could not load descriptor ring map.\n");
243 		return (1);
244 	}
245 
246 	for (idx = 0; idx < sc->desc_count; idx++) {
247 		sc->desc_ring[idx].des0 = DES0_CH;
248 		sc->desc_ring[idx].des1 = 0;
249 		nidx = (idx + 1) % sc->desc_count;
250 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
251 		    (nidx * sizeof(struct idmac_desc));
252 	}
253 
254 	error = bus_dma_tag_create(
255 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
256 	    4096, 0,			/* alignment, boundary */
257 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
258 	    BUS_SPACE_MAXADDR,		/* highaddr */
259 	    NULL, NULL,			/* filter, filterarg */
260 	    sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */
261 	    sc->desc_count,		/* nsegments */
262 	    MMC_SECTOR_SIZE,		/* maxsegsize */
263 	    0,				/* flags */
264 	    NULL, NULL,			/* lockfunc, lockarg */
265 	    &sc->buf_tag);
266 	if (error != 0) {
267 		device_printf(sc->dev,
268 		    "could not create ring DMA tag.\n");
269 		return (1);
270 	}
271 
272 	error = bus_dmamap_create(sc->buf_tag, 0,
273 	    &sc->buf_map);
274 	if (error != 0) {
275 		device_printf(sc->dev,
276 		    "could not create TX buffer DMA map.\n");
277 		return (1);
278 	}
279 
280 	return (0);
281 }
282 
283 static void
284 dwmmc_cmd_done(struct dwmmc_softc *sc)
285 {
286 	struct mmc_command *cmd;
287 
288 	cmd = sc->curcmd;
289 	if (cmd == NULL)
290 		return;
291 
292 	if (cmd->flags & MMC_RSP_PRESENT) {
293 		if (cmd->flags & MMC_RSP_136) {
294 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
295 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
296 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
297 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
298 		} else {
299 			cmd->resp[3] = 0;
300 			cmd->resp[2] = 0;
301 			cmd->resp[1] = 0;
302 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
303 		}
304 	}
305 }
306 
307 static void
308 dwmmc_tasklet(struct dwmmc_softc *sc)
309 {
310 	struct mmc_command *cmd;
311 
312 	cmd = sc->curcmd;
313 	if (cmd == NULL)
314 		return;
315 
316 	if (!sc->cmd_done)
317 		return;
318 
319 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
320 		dwmmc_next_operation(sc);
321 	} else if (cmd->data && sc->dto_rcvd) {
322 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
323 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
324 		     sc->use_auto_stop) {
325 			if (sc->acd_rcvd)
326 				dwmmc_next_operation(sc);
327 		} else {
328 			dwmmc_next_operation(sc);
329 		}
330 	}
331 }
332 
333 static void
334 dwmmc_intr(void *arg)
335 {
336 	struct mmc_command *cmd;
337 	struct dwmmc_softc *sc;
338 	uint32_t reg;
339 
340 	sc = arg;
341 
342 	DWMMC_LOCK(sc);
343 
344 	cmd = sc->curcmd;
345 
346 	/* First handle SDMMC controller interrupts */
347 	reg = READ4(sc, SDMMC_MINTSTS);
348 	if (reg) {
349 		dprintf("%s 0x%08x\n", __func__, reg);
350 
351 		if (reg & DWMMC_CMD_ERR_FLAGS) {
352 			WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS);
353 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
354 				reg, cmd->opcode);
355 			cmd->error = MMC_ERR_TIMEOUT;
356 		}
357 
358 		if (reg & DWMMC_DATA_ERR_FLAGS) {
359 			WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS);
360 			dprintf("data err 0x%08x cmd 0x%08x\n",
361 				reg, cmd->opcode);
362 			cmd->error = MMC_ERR_FAILED;
363 			if (!sc->use_pio) {
364 				dma_done(sc, cmd);
365 				dma_stop(sc);
366 			}
367 		}
368 
369 		if (reg & SDMMC_INTMASK_CMD_DONE) {
370 			dwmmc_cmd_done(sc);
371 			sc->cmd_done = 1;
372 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE);
373 		}
374 
375 		if (reg & SDMMC_INTMASK_ACD) {
376 			sc->acd_rcvd = 1;
377 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD);
378 		}
379 
380 		if (reg & SDMMC_INTMASK_DTO) {
381 			sc->dto_rcvd = 1;
382 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO);
383 		}
384 
385 		if (reg & SDMMC_INTMASK_CD) {
386 			/* XXX: Handle card detect */
387 			WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD);
388 		}
389 	}
390 
391 	if (sc->use_pio) {
392 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
393 			pio_read(sc, cmd);
394 		}
395 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
396 			pio_write(sc, cmd);
397 		}
398 	} else {
399 		/* Now handle DMA interrupts */
400 		reg = READ4(sc, SDMMC_IDSTS);
401 		if (reg) {
402 			dprintf("dma intr 0x%08x\n", reg);
403 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
404 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
405 							 SDMMC_IDINTEN_RI));
406 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
407 				dma_done(sc, cmd);
408 			}
409 		}
410 	}
411 
412 	dwmmc_tasklet(sc);
413 
414 	DWMMC_UNLOCK(sc);
415 }
416 
417 static int
418 parse_fdt(struct dwmmc_softc *sc)
419 {
420 	pcell_t dts_value[3];
421 	phandle_t node;
422 	int len;
423 
424 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
425 		return (ENXIO);
426 
427 	/* fifo-depth */
428 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
429 		OF_getencprop(node, "fifo-depth", dts_value, len);
430 		sc->fifo_depth = dts_value[0];
431 	}
432 
433 	/* num-slots */
434 	sc->num_slots = 1;
435 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
436 		OF_getencprop(node, "num-slots", dts_value, len);
437 		sc->num_slots = dts_value[0];
438 	}
439 
440 	/*
441 	 * We need some platform-specific code to know
442 	 * what the clock is supplied for our device.
443 	 * For now rely on the value specified in FDT.
444 	 */
445 	if (sc->bus_hz == 0) {
446 		if ((len = OF_getproplen(node, "bus-frequency")) <= 0)
447 			return (ENXIO);
448 		OF_getencprop(node, "bus-frequency", dts_value, len);
449 		sc->bus_hz = dts_value[0];
450 	}
451 
452 	/*
453 	 * Platform-specific stuff
454 	 * XXX: Move to separate file
455 	 */
456 
457 	if ((sc->hwtype & HWTYPE_MASK) != HWTYPE_EXYNOS)
458 		return (0);
459 
460 	if ((len = OF_getproplen(node, "samsung,dw-mshc-ciu-div")) <= 0)
461 		return (ENXIO);
462 	OF_getencprop(node, "samsung,dw-mshc-ciu-div", dts_value, len);
463 	sc->sdr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT);
464 	sc->ddr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT);
465 
466 	if ((len = OF_getproplen(node, "samsung,dw-mshc-sdr-timing")) <= 0)
467 		return (ENXIO);
468 	OF_getencprop(node, "samsung,dw-mshc-sdr-timing", dts_value, len);
469 	sc->sdr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) |
470 			  (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT));
471 
472 	if ((len = OF_getproplen(node, "samsung,dw-mshc-ddr-timing")) <= 0)
473 		return (ENXIO);
474 	OF_getencprop(node, "samsung,dw-mshc-ddr-timing", dts_value, len);
475 	sc->ddr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) |
476 			  (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT));
477 
478 	return (0);
479 }
480 
481 static int
482 dwmmc_probe(device_t dev)
483 {
484 	uintptr_t hwtype;
485 
486 	if (!ofw_bus_status_okay(dev))
487 		return (ENXIO);
488 
489 	hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
490 	if (hwtype == HWTYPE_NONE)
491 		return (ENXIO);
492 
493 	device_set_desc(dev, "Synopsys DesignWare Mobile "
494 				"Storage Host Controller");
495 	return (BUS_PROBE_DEFAULT);
496 }
497 
498 int
499 dwmmc_attach(device_t dev)
500 {
501 	struct dwmmc_softc *sc;
502 	int error;
503 	int slot;
504 
505 	sc = device_get_softc(dev);
506 
507 	sc->dev = dev;
508 	if (sc->hwtype == HWTYPE_NONE) {
509 		sc->hwtype =
510 		    ofw_bus_search_compatible(dev, compat_data)->ocd_data;
511 	}
512 
513 	/* Why not to use Auto Stop? It save a hundred of irq per second */
514 	sc->use_auto_stop = 1;
515 
516 	error = parse_fdt(sc);
517 	if (error != 0) {
518 		device_printf(dev, "Can't get FDT property.\n");
519 		return (ENXIO);
520 	}
521 
522 	DWMMC_LOCK_INIT(sc);
523 
524 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
525 		device_printf(dev, "could not allocate resources\n");
526 		return (ENXIO);
527 	}
528 
529 	/* Setup interrupt handler. */
530 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
531 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
532 	if (error != 0) {
533 		device_printf(dev, "could not setup interrupt handler.\n");
534 		return (ENXIO);
535 	}
536 
537 	device_printf(dev, "Hardware version ID is %04x\n",
538 		READ4(sc, SDMMC_VERID) & 0xffff);
539 
540 	if (sc->desc_count == 0)
541 		sc->desc_count = DESC_MAX;
542 
543 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) {
544 		sc->use_pio = 1;
545 		sc->pwren_inverted = 1;
546 	} else if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
547 		WRITE4(sc, EMMCP_MPSBEGIN0, 0);
548 		WRITE4(sc, EMMCP_SEND0, 0);
549 		WRITE4(sc, EMMCP_CTRL0, (MPSCTRL_SECURE_READ_BIT |
550 					 MPSCTRL_SECURE_WRITE_BIT |
551 					 MPSCTRL_NON_SECURE_READ_BIT |
552 					 MPSCTRL_NON_SECURE_WRITE_BIT |
553 					 MPSCTRL_VALID));
554 	}
555 
556 	/* XXX: we support operation for slot index 0 only */
557 	slot = 0;
558 	if (sc->pwren_inverted) {
559 		WRITE4(sc, SDMMC_PWREN, (0 << slot));
560 	} else {
561 		WRITE4(sc, SDMMC_PWREN, (1 << slot));
562 	}
563 
564 	/* Reset all */
565 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
566 				  SDMMC_CTRL_FIFO_RESET |
567 				  SDMMC_CTRL_DMA_RESET)))
568 		return (ENXIO);
569 
570 	dwmmc_setup_bus(sc, sc->host.f_min);
571 
572 	if (sc->fifo_depth == 0) {
573 		sc->fifo_depth = 1 +
574 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
575 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
576 		    sc->fifo_depth);
577 	}
578 
579 	if (!sc->use_pio) {
580 		if (dma_setup(sc))
581 			return (ENXIO);
582 
583 		/* Install desc base */
584 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
585 
586 		/* Enable DMA interrupts */
587 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
588 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
589 					   SDMMC_IDINTEN_RI |
590 					   SDMMC_IDINTEN_TI));
591 	}
592 
593 	/* Clear and disable interrups for a while */
594 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
595 	WRITE4(sc, SDMMC_INTMASK, 0);
596 
597 	/* Maximum timeout */
598 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
599 
600 	/* Enable interrupts */
601 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
602 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
603 				   SDMMC_INTMASK_DTO |
604 				   SDMMC_INTMASK_ACD |
605 				   SDMMC_INTMASK_TXDR |
606 				   SDMMC_INTMASK_RXDR |
607 				   DWMMC_ERR_FLAGS |
608 				   SDMMC_INTMASK_CD));
609 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
610 
611 	sc->host.f_min = 400000;
612 	sc->host.f_max = min(200000000, sc->bus_hz);
613 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
614 	sc->host.caps = MMC_CAP_4_BIT_DATA;
615 
616 	device_add_child(dev, "mmc", -1);
617 	return (bus_generic_attach(dev));
618 }
619 
620 static int
621 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
622 {
623 	int tout;
624 	int div;
625 
626 	if (freq == 0) {
627 		WRITE4(sc, SDMMC_CLKENA, 0);
628 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
629 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
630 
631 		tout = 1000;
632 		do {
633 			if (tout-- < 0) {
634 				device_printf(sc->dev, "Failed update clk\n");
635 				return (1);
636 			}
637 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
638 
639 		return (0);
640 	}
641 
642 	WRITE4(sc, SDMMC_CLKENA, 0);
643 	WRITE4(sc, SDMMC_CLKSRC, 0);
644 
645 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
646 
647 	WRITE4(sc, SDMMC_CLKDIV, div);
648 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
649 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
650 
651 	tout = 1000;
652 	do {
653 		if (tout-- < 0) {
654 			device_printf(sc->dev, "Failed to update clk");
655 			return (1);
656 		}
657 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
658 
659 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
660 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
661 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
662 
663 	tout = 1000;
664 	do {
665 		if (tout-- < 0) {
666 			device_printf(sc->dev, "Failed to enable clk\n");
667 			return (1);
668 		}
669 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
670 
671 	return (0);
672 }
673 
674 static int
675 dwmmc_update_ios(device_t brdev, device_t reqdev)
676 {
677 	struct dwmmc_softc *sc;
678 	struct mmc_ios *ios;
679 
680 	sc = device_get_softc(brdev);
681 	ios = &sc->host.ios;
682 
683 	dprintf("Setting up clk %u bus_width %d\n",
684 		ios->clock, ios->bus_width);
685 
686 	dwmmc_setup_bus(sc, ios->clock);
687 
688 	if (ios->bus_width == bus_width_8)
689 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
690 	else if (ios->bus_width == bus_width_4)
691 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
692 	else
693 		WRITE4(sc, SDMMC_CTYPE, 0);
694 
695 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
696 		/* XXX: take care about DDR or SDR use here */
697 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
698 	}
699 
700 	/*
701 	 * XXX: take care about DDR bit
702 	 *
703 	 * reg = READ4(sc, SDMMC_UHS_REG);
704 	 * reg |= (SDMMC_UHS_REG_DDR);
705 	 * WRITE4(sc, SDMMC_UHS_REG, reg);
706 	 */
707 
708 	return (0);
709 }
710 
711 static int
712 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
713 {
714 	struct mmc_data *data;
715 
716 	data = cmd->data;
717 
718 	if (data->flags & MMC_DATA_WRITE)
719 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
720 			BUS_DMASYNC_POSTWRITE);
721 	else
722 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
723 			BUS_DMASYNC_POSTREAD);
724 
725 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
726 	    BUS_DMASYNC_POSTWRITE);
727 
728 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
729 
730 	return (0);
731 }
732 
733 static int
734 dma_stop(struct dwmmc_softc *sc)
735 {
736 	int reg;
737 
738 	reg = READ4(sc, SDMMC_CTRL);
739 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
740 	reg |= (SDMMC_CTRL_DMA_RESET);
741 	WRITE4(sc, SDMMC_CTRL, reg);
742 
743 	reg = READ4(sc, SDMMC_BMOD);
744 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
745 	reg |= (SDMMC_BMOD_SWR);
746 	WRITE4(sc, SDMMC_BMOD, reg);
747 
748 	return (0);
749 }
750 
751 static int
752 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
753 {
754 	struct mmc_data *data;
755 	int len;
756 	int err;
757 	int reg;
758 
759 	data = cmd->data;
760 	len = data->len;
761 
762 	reg = READ4(sc, SDMMC_INTMASK);
763 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
764 	WRITE4(sc, SDMMC_INTMASK, reg);
765 
766 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
767 		data->data, data->len, dwmmc_ring_setup,
768 		sc, BUS_DMA_NOWAIT);
769 	if (err != 0)
770 		panic("dmamap_load failed\n");
771 
772 	/* Ensure the device can see the desc */
773 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
774 	    BUS_DMASYNC_PREWRITE);
775 
776 	if (data->flags & MMC_DATA_WRITE)
777 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
778 			BUS_DMASYNC_PREWRITE);
779 	else
780 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
781 			BUS_DMASYNC_PREREAD);
782 
783 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
784 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
785 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
786 
787 	WRITE4(sc, SDMMC_FIFOTH, reg);
788 	wmb();
789 
790 	reg = READ4(sc, SDMMC_CTRL);
791 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
792 	WRITE4(sc, SDMMC_CTRL, reg);
793 	wmb();
794 
795 	reg = READ4(sc, SDMMC_BMOD);
796 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
797 	WRITE4(sc, SDMMC_BMOD, reg);
798 
799 	/* Start */
800 	WRITE4(sc, SDMMC_PLDMND, 1);
801 
802 	return (0);
803 }
804 
805 static int
806 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
807 {
808 	struct mmc_data *data;
809 	int reg;
810 
811 	data = cmd->data;
812 	data->xfer_len = 0;
813 
814 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
815 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
816 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
817 
818 	WRITE4(sc, SDMMC_FIFOTH, reg);
819 	wmb();
820 
821 	return (0);
822 }
823 
824 static void
825 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
826 {
827 	struct mmc_data *data;
828 	uint32_t *p, status;
829 
830 	if (cmd == NULL || cmd->data == NULL)
831 		return;
832 
833 	data = cmd->data;
834 	if ((data->flags & MMC_DATA_READ) == 0)
835 		return;
836 
837 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
838 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
839 
840 	while (data->xfer_len < data->len) {
841 		status = READ4(sc, SDMMC_STATUS);
842 		if (status & SDMMC_STATUS_FIFO_EMPTY)
843 			break;
844 		*p++ = READ4(sc, SDMMC_DATA);
845 		data->xfer_len += 4;
846 	}
847 
848 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
849 }
850 
851 static void
852 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
853 {
854 	struct mmc_data *data;
855 	uint32_t *p, status;
856 
857 	if (cmd == NULL || cmd->data == NULL)
858 		return;
859 
860 	data = cmd->data;
861 	if ((data->flags & MMC_DATA_WRITE) == 0)
862 		return;
863 
864 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
865 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
866 
867 	while (data->xfer_len < data->len) {
868 		status = READ4(sc, SDMMC_STATUS);
869 		if (status & SDMMC_STATUS_FIFO_FULL)
870 			break;
871 		WRITE4(sc, SDMMC_DATA, *p++);
872 		data->xfer_len += 4;
873 	}
874 
875 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
876 }
877 
878 static void
879 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
880 {
881 	struct mmc_data *data;
882 	uint32_t blksz;
883 	uint32_t cmdr;
884 
885 	sc->curcmd = cmd;
886 	data = cmd->data;
887 
888 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
889 		dwmmc_setup_bus(sc, sc->host.ios.clock);
890 
891 	/* XXX Upper layers don't always set this */
892 	cmd->mrq = sc->req;
893 
894 	/* Begin setting up command register. */
895 
896 	cmdr = cmd->opcode;
897 
898 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
899 
900 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
901 	    cmd->opcode == MMC_GO_IDLE_STATE ||
902 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
903 		cmdr |= SDMMC_CMD_STOP_ABORT;
904 	else if (cmd->opcode != MMC_SEND_STATUS && data)
905 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
906 
907 	/* Set up response handling. */
908 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
909 		cmdr |= SDMMC_CMD_RESP_EXP;
910 		if (cmd->flags & MMC_RSP_136)
911 			cmdr |= SDMMC_CMD_RESP_LONG;
912 	}
913 
914 	if (cmd->flags & MMC_RSP_CRC)
915 		cmdr |= SDMMC_CMD_RESP_CRC;
916 
917 	/*
918 	 * XXX: Not all platforms want this.
919 	 */
920 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
921 
922 	if ((sc->flags & CARD_INIT_DONE) == 0) {
923 		sc->flags |= (CARD_INIT_DONE);
924 		cmdr |= SDMMC_CMD_SEND_INIT;
925 	}
926 
927 	if (data) {
928 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
929 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
930 		     sc->use_auto_stop)
931 			cmdr |= SDMMC_CMD_SEND_ASTOP;
932 
933 		cmdr |= SDMMC_CMD_DATA_EXP;
934 		if (data->flags & MMC_DATA_STREAM)
935 			cmdr |= SDMMC_CMD_MODE_STREAM;
936 		if (data->flags & MMC_DATA_WRITE)
937 			cmdr |= SDMMC_CMD_DATA_WRITE;
938 
939 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
940 		WRITE4(sc, SDMMC_BYTCNT, data->len);
941 		blksz = (data->len < MMC_SECTOR_SIZE) ? \
942 			 data->len : MMC_SECTOR_SIZE;
943 		WRITE4(sc, SDMMC_BLKSIZ, blksz);
944 
945 		if (sc->use_pio) {
946 			pio_prepare(sc, cmd);
947 		} else {
948 			dma_prepare(sc, cmd);
949 		}
950 		wmb();
951 	}
952 
953 	dprintf("cmdr 0x%08x\n", cmdr);
954 
955 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
956 	wmb();
957 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
958 };
959 
960 static void
961 dwmmc_next_operation(struct dwmmc_softc *sc)
962 {
963 	struct mmc_request *req;
964 
965 	req = sc->req;
966 	if (req == NULL)
967 		return;
968 
969 	sc->acd_rcvd = 0;
970 	sc->dto_rcvd = 0;
971 	sc->cmd_done = 0;
972 
973 	/*
974 	 * XXX: Wait until card is still busy.
975 	 * We do need this to prevent data timeouts,
976 	 * mostly caused by multi-block write command
977 	 * followed by single-read.
978 	 */
979 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
980 		continue;
981 
982 	if (sc->flags & PENDING_CMD) {
983 		sc->flags &= ~PENDING_CMD;
984 		dwmmc_start_cmd(sc, req->cmd);
985 		return;
986 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
987 		sc->flags &= ~PENDING_STOP;
988 		dwmmc_start_cmd(sc, req->stop);
989 		return;
990 	}
991 
992 	sc->req = NULL;
993 	sc->curcmd = NULL;
994 	req->done(req);
995 }
996 
997 static int
998 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
999 {
1000 	struct dwmmc_softc *sc;
1001 
1002 	sc = device_get_softc(brdev);
1003 
1004 	dprintf("%s\n", __func__);
1005 
1006 	DWMMC_LOCK(sc);
1007 
1008 	if (sc->req != NULL) {
1009 		DWMMC_UNLOCK(sc);
1010 		return (EBUSY);
1011 	}
1012 
1013 	sc->req = req;
1014 	sc->flags |= PENDING_CMD;
1015 	if (sc->req->stop)
1016 		sc->flags |= PENDING_STOP;
1017 	dwmmc_next_operation(sc);
1018 
1019 	DWMMC_UNLOCK(sc);
1020 	return (0);
1021 }
1022 
1023 static int
1024 dwmmc_get_ro(device_t brdev, device_t reqdev)
1025 {
1026 
1027 	dprintf("%s\n", __func__);
1028 
1029 	return (0);
1030 }
1031 
1032 static int
1033 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1034 {
1035 	struct dwmmc_softc *sc;
1036 
1037 	sc = device_get_softc(brdev);
1038 
1039 	DWMMC_LOCK(sc);
1040 	while (sc->bus_busy)
1041 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1042 	sc->bus_busy++;
1043 	DWMMC_UNLOCK(sc);
1044 	return (0);
1045 }
1046 
1047 static int
1048 dwmmc_release_host(device_t brdev, device_t reqdev)
1049 {
1050 	struct dwmmc_softc *sc;
1051 
1052 	sc = device_get_softc(brdev);
1053 
1054 	DWMMC_LOCK(sc);
1055 	sc->bus_busy--;
1056 	wakeup(sc);
1057 	DWMMC_UNLOCK(sc);
1058 	return (0);
1059 }
1060 
1061 static int
1062 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1063 {
1064 	struct dwmmc_softc *sc;
1065 
1066 	sc = device_get_softc(bus);
1067 
1068 	switch (which) {
1069 	default:
1070 		return (EINVAL);
1071 	case MMCBR_IVAR_BUS_MODE:
1072 		*(int *)result = sc->host.ios.bus_mode;
1073 		break;
1074 	case MMCBR_IVAR_BUS_WIDTH:
1075 		*(int *)result = sc->host.ios.bus_width;
1076 		break;
1077 	case MMCBR_IVAR_CHIP_SELECT:
1078 		*(int *)result = sc->host.ios.chip_select;
1079 		break;
1080 	case MMCBR_IVAR_CLOCK:
1081 		*(int *)result = sc->host.ios.clock;
1082 		break;
1083 	case MMCBR_IVAR_F_MIN:
1084 		*(int *)result = sc->host.f_min;
1085 		break;
1086 	case MMCBR_IVAR_F_MAX:
1087 		*(int *)result = sc->host.f_max;
1088 		break;
1089 	case MMCBR_IVAR_HOST_OCR:
1090 		*(int *)result = sc->host.host_ocr;
1091 		break;
1092 	case MMCBR_IVAR_MODE:
1093 		*(int *)result = sc->host.mode;
1094 		break;
1095 	case MMCBR_IVAR_OCR:
1096 		*(int *)result = sc->host.ocr;
1097 		break;
1098 	case MMCBR_IVAR_POWER_MODE:
1099 		*(int *)result = sc->host.ios.power_mode;
1100 		break;
1101 	case MMCBR_IVAR_VDD:
1102 		*(int *)result = sc->host.ios.vdd;
1103 		break;
1104 	case MMCBR_IVAR_CAPS:
1105 		sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
1106 		*(int *)result = sc->host.caps;
1107 		break;
1108 	case MMCBR_IVAR_MAX_DATA:
1109 		*(int *)result = sc->desc_count;
1110 	}
1111 	return (0);
1112 }
1113 
1114 static int
1115 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1116 {
1117 	struct dwmmc_softc *sc;
1118 
1119 	sc = device_get_softc(bus);
1120 
1121 	switch (which) {
1122 	default:
1123 		return (EINVAL);
1124 	case MMCBR_IVAR_BUS_MODE:
1125 		sc->host.ios.bus_mode = value;
1126 		break;
1127 	case MMCBR_IVAR_BUS_WIDTH:
1128 		sc->host.ios.bus_width = value;
1129 		break;
1130 	case MMCBR_IVAR_CHIP_SELECT:
1131 		sc->host.ios.chip_select = value;
1132 		break;
1133 	case MMCBR_IVAR_CLOCK:
1134 		sc->host.ios.clock = value;
1135 		break;
1136 	case MMCBR_IVAR_MODE:
1137 		sc->host.mode = value;
1138 		break;
1139 	case MMCBR_IVAR_OCR:
1140 		sc->host.ocr = value;
1141 		break;
1142 	case MMCBR_IVAR_POWER_MODE:
1143 		sc->host.ios.power_mode = value;
1144 		break;
1145 	case MMCBR_IVAR_VDD:
1146 		sc->host.ios.vdd = value;
1147 		break;
1148 	/* These are read-only */
1149 	case MMCBR_IVAR_CAPS:
1150 	case MMCBR_IVAR_HOST_OCR:
1151 	case MMCBR_IVAR_F_MIN:
1152 	case MMCBR_IVAR_F_MAX:
1153 	case MMCBR_IVAR_MAX_DATA:
1154 		return (EINVAL);
1155 	}
1156 	return (0);
1157 }
1158 
1159 static device_method_t dwmmc_methods[] = {
1160 	DEVMETHOD(device_probe,		dwmmc_probe),
1161 	DEVMETHOD(device_attach,	dwmmc_attach),
1162 
1163 	/* Bus interface */
1164 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1165 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1166 
1167 	/* mmcbr_if */
1168 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1169 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1170 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1171 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1172 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1173 
1174 	DEVMETHOD_END
1175 };
1176 
1177 driver_t dwmmc_driver = {
1178 	"dwmmc",
1179 	dwmmc_methods,
1180 	sizeof(struct dwmmc_softc),
1181 };
1182 
1183 static devclass_t dwmmc_devclass;
1184 
1185 DRIVER_MODULE(dwmmc, simplebus, dwmmc_driver, dwmmc_devclass, 0, 0);
1186 DRIVER_MODULE(dwmmc, ofwbus, dwmmc_driver, dwmmc_devclass, 0, 0);
1187 DRIVER_MODULE(mmc, dwmmc, mmc_driver, mmc_devclass, NULL, NULL);
1188 MODULE_DEPEND(dwmmc, mmc, 1, 1, 1);
1189