xref: /freebsd/sys/arm/allwinner/aw_mmc.c (revision e08e9e999091f86081377b7cedc3fd2fe2ab70fc)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
5  * Copyright (c) 2013 Alexander Fedorov
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/resource.h>
42 #include <sys/rman.h>
43 #include <sys/sysctl.h>
44 
45 #include <machine/bus.h>
46 
47 #include <dev/ofw/ofw_bus.h>
48 #include <dev/ofw/ofw_bus_subr.h>
49 
50 #include <dev/mmc/bridge.h>
51 #include <dev/mmc/mmcbrvar.h>
52 
53 #include <arm/allwinner/aw_mmc.h>
54 #include <dev/extres/clk/clk.h>
55 #include <dev/extres/hwreset/hwreset.h>
56 #include <dev/extres/regulator/regulator.h>
57 
58 #define	AW_MMC_MEMRES		0
59 #define	AW_MMC_IRQRES		1
60 #define	AW_MMC_RESSZ		2
61 #define	AW_MMC_DMA_SEGS		((MAXPHYS / PAGE_SIZE) + 1)
62 #define	AW_MMC_DMA_FTRGLEVEL	0x20070008
63 #define	AW_MMC_RESET_RETRY	1000
64 
65 #define	CARD_ID_FREQUENCY	400000
66 
67 struct aw_mmc_conf {
68 	uint32_t	dma_xferlen;
69 	bool		mask_data0;
70 	bool		can_calibrate;
71 	bool		new_timing;
72 };
73 
74 static const struct aw_mmc_conf a10_mmc_conf = {
75 	.dma_xferlen = 0x2000,
76 };
77 
78 static const struct aw_mmc_conf a13_mmc_conf = {
79 	.dma_xferlen = 0x10000,
80 };
81 
82 static const struct aw_mmc_conf a64_mmc_conf = {
83 	.dma_xferlen = 0x10000,
84 	.mask_data0 = true,
85 	.can_calibrate = true,
86 	.new_timing = true,
87 };
88 
89 static const struct aw_mmc_conf a64_emmc_conf = {
90 	.dma_xferlen = 0x2000,
91 	.can_calibrate = true,
92 };
93 
94 static struct ofw_compat_data compat_data[] = {
95 	{"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
96 	{"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
97 	{"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
98 	{"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
99 	{"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
100 	{NULL,             0}
101 };
102 
103 struct aw_mmc_softc {
104 	device_t		aw_dev;
105 	clk_t			aw_clk_ahb;
106 	clk_t			aw_clk_mmc;
107 	hwreset_t		aw_rst_ahb;
108 	int			aw_bus_busy;
109 	int			aw_resid;
110 	int			aw_timeout;
111 	struct callout		aw_timeoutc;
112 	struct mmc_host		aw_host;
113 	struct mmc_request *	aw_req;
114 	struct mtx		aw_mtx;
115 	struct resource *	aw_res[AW_MMC_RESSZ];
116 	struct aw_mmc_conf *	aw_mmc_conf;
117 	uint32_t		aw_intr;
118 	uint32_t		aw_intr_wait;
119 	void *			aw_intrhand;
120 	int32_t			aw_vdd;
121 	int32_t			aw_vccq;
122 	regulator_t		aw_reg_vmmc;
123 	regulator_t		aw_reg_vqmmc;
124 	unsigned int		aw_clock;
125 
126 	/* Fields required for DMA access. */
127 	bus_addr_t	  	aw_dma_desc_phys;
128 	bus_dmamap_t		aw_dma_map;
129 	bus_dma_tag_t 		aw_dma_tag;
130 	void * 			aw_dma_desc;
131 	bus_dmamap_t		aw_dma_buf_map;
132 	bus_dma_tag_t		aw_dma_buf_tag;
133 	int			aw_dma_map_err;
134 };
135 
136 static struct resource_spec aw_mmc_res_spec[] = {
137 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
138 	{ SYS_RES_IRQ,		0,	RF_ACTIVE | RF_SHAREABLE },
139 	{ -1,			0,	0 }
140 };
141 
142 static int aw_mmc_probe(device_t);
143 static int aw_mmc_attach(device_t);
144 static int aw_mmc_detach(device_t);
145 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
146 static int aw_mmc_reset(struct aw_mmc_softc *);
147 static int aw_mmc_init(struct aw_mmc_softc *);
148 static void aw_mmc_intr(void *);
149 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
150 
151 static int aw_mmc_update_ios(device_t, device_t);
152 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
153 static int aw_mmc_get_ro(device_t, device_t);
154 static int aw_mmc_acquire_host(device_t, device_t);
155 static int aw_mmc_release_host(device_t, device_t);
156 
157 #define	AW_MMC_LOCK(_sc)	mtx_lock(&(_sc)->aw_mtx)
158 #define	AW_MMC_UNLOCK(_sc)	mtx_unlock(&(_sc)->aw_mtx)
159 #define	AW_MMC_READ_4(_sc, _reg)					\
160 	bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
161 #define	AW_MMC_WRITE_4(_sc, _reg, _value)				\
162 	bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
163 
164 static int
165 aw_mmc_probe(device_t dev)
166 {
167 
168 	if (!ofw_bus_status_okay(dev))
169 		return (ENXIO);
170 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
171 		return (ENXIO);
172 
173 	device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
174 
175 	return (BUS_PROBE_DEFAULT);
176 }
177 
178 static int
179 aw_mmc_attach(device_t dev)
180 {
181 	device_t child;
182 	struct aw_mmc_softc *sc;
183 	struct sysctl_ctx_list *ctx;
184 	struct sysctl_oid_list *tree;
185 	uint32_t bus_width;
186 	phandle_t node;
187 	int error;
188 
189 	node = ofw_bus_get_node(dev);
190 	sc = device_get_softc(dev);
191 	sc->aw_dev = dev;
192 
193 	sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
194 
195 	sc->aw_req = NULL;
196 	if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
197 		device_printf(dev, "cannot allocate device resources\n");
198 		return (ENXIO);
199 	}
200 	if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
201 	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
202 	    &sc->aw_intrhand)) {
203 		bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
204 		device_printf(dev, "cannot setup interrupt handler\n");
205 		return (ENXIO);
206 	}
207 	mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
208 	    MTX_DEF);
209 	callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
210 
211 	/* De-assert reset */
212 	if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
213 		error = hwreset_deassert(sc->aw_rst_ahb);
214 		if (error != 0) {
215 			device_printf(dev, "cannot de-assert reset\n");
216 			goto fail;
217 		}
218 	}
219 
220 	/* Activate the module clock. */
221 	error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
222 	if (error != 0) {
223 		device_printf(dev, "cannot get ahb clock\n");
224 		goto fail;
225 	}
226 	error = clk_enable(sc->aw_clk_ahb);
227 	if (error != 0) {
228 		device_printf(dev, "cannot enable ahb clock\n");
229 		goto fail;
230 	}
231 	error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
232 	if (error != 0) {
233 		device_printf(dev, "cannot get mmc clock\n");
234 		goto fail;
235 	}
236 	error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
237 	    CLK_SET_ROUND_DOWN);
238 	if (error != 0) {
239 		device_printf(dev, "cannot init mmc clock\n");
240 		goto fail;
241 	}
242 	error = clk_enable(sc->aw_clk_mmc);
243 	if (error != 0) {
244 		device_printf(dev, "cannot enable mmc clock\n");
245 		goto fail;
246 	}
247 
248 	sc->aw_timeout = 10;
249 	ctx = device_get_sysctl_ctx(dev);
250 	tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
251 	SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
252 	    &sc->aw_timeout, 0, "Request timeout in seconds");
253 
254 	/* Soft Reset controller. */
255 	if (aw_mmc_reset(sc) != 0) {
256 		device_printf(dev, "cannot reset the controller\n");
257 		goto fail;
258 	}
259 
260 	if (aw_mmc_setup_dma(sc) != 0) {
261 		device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
262 		goto fail;
263 	}
264 
265 	if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
266 		bus_width = 4;
267 
268 	if (regulator_get_by_ofw_property(dev, 0, "vmmc-supply",
269 	    &sc->aw_reg_vmmc) == 0) {
270 		if (bootverbose)
271 			device_printf(dev, "vmmc-supply regulator found\n");
272 	}
273 	if (regulator_get_by_ofw_property(dev, 0, "vqmmc-supply",
274 	    &sc->aw_reg_vqmmc) == 0 && bootverbose) {
275 		if (bootverbose)
276 			device_printf(dev, "vqmmc-supply regulator found\n");
277 	}
278 
279 	sc->aw_host.f_min = 400000;
280 	sc->aw_host.f_max = 52000000;
281 	sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
282 	sc->aw_host.caps = MMC_CAP_HSPEED | MMC_CAP_UHS_SDR12 |
283 			   MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 |
284 			   MMC_CAP_UHS_DDR50 | MMC_CAP_MMC_DDR52;
285 
286 	sc->aw_host.caps |= MMC_CAP_SIGNALING_330 | MMC_CAP_SIGNALING_180;
287 
288 	if (bus_width >= 4)
289 		sc->aw_host.caps |= MMC_CAP_4_BIT_DATA;
290 	if (bus_width >= 8)
291 		sc->aw_host.caps |= MMC_CAP_8_BIT_DATA;
292 
293 	child = device_add_child(dev, "mmc", -1);
294 	if (child == NULL) {
295 		device_printf(dev, "attaching MMC bus failed!\n");
296 		goto fail;
297 	}
298 	if (device_probe_and_attach(child) != 0) {
299 		device_printf(dev, "attaching MMC child failed!\n");
300 		device_delete_child(dev, child);
301 		goto fail;
302 	}
303 
304 	return (0);
305 
306 fail:
307 	callout_drain(&sc->aw_timeoutc);
308 	mtx_destroy(&sc->aw_mtx);
309 	bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
310 	bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
311 
312 	return (ENXIO);
313 }
314 
315 static int
316 aw_mmc_detach(device_t dev)
317 {
318 
319 	return (EBUSY);
320 }
321 
322 static void
323 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
324 {
325 	struct aw_mmc_softc *sc;
326 
327 	sc = (struct aw_mmc_softc *)arg;
328 	if (err) {
329 		sc->aw_dma_map_err = err;
330 		return;
331 	}
332 	sc->aw_dma_desc_phys = segs[0].ds_addr;
333 }
334 
335 static int
336 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
337 {
338 	int dma_desc_size, error;
339 
340 	/* Allocate the DMA descriptor memory. */
341 	dma_desc_size = sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS;
342 	error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
343 	    AW_MMC_DMA_ALIGN, 0,
344 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
345 	    dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->aw_dma_tag);
346 	if (error)
347 		return (error);
348 	error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
349 	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map);
350 	if (error)
351 		return (error);
352 
353 	error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map,
354 	    sc->aw_dma_desc, dma_desc_size, aw_dma_desc_cb, sc, 0);
355 	if (error)
356 		return (error);
357 	if (sc->aw_dma_map_err)
358 		return (sc->aw_dma_map_err);
359 
360 	/* Create the DMA map for data transfers. */
361 	error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
362 	    AW_MMC_DMA_ALIGN, 0,
363 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
364 	    sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS,
365 	    sc->aw_mmc_conf->dma_xferlen, BUS_DMA_ALLOCNOW, NULL, NULL,
366 	    &sc->aw_dma_buf_tag);
367 	if (error)
368 		return (error);
369 	error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
370 	    &sc->aw_dma_buf_map);
371 	if (error)
372 		return (error);
373 
374 	return (0);
375 }
376 
377 static void
378 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
379 {
380 	int i;
381 	struct aw_mmc_dma_desc *dma_desc;
382 	struct aw_mmc_softc *sc;
383 
384 	sc = (struct aw_mmc_softc *)arg;
385 	sc->aw_dma_map_err = err;
386 
387 	if (err)
388 		return;
389 
390 	dma_desc = sc->aw_dma_desc;
391 	for (i = 0; i < nsegs; i++) {
392 		dma_desc[i].buf_size = segs[i].ds_len;
393 		dma_desc[i].buf_addr = segs[i].ds_addr;
394 		dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
395 		    AW_MMC_DMA_CONFIG_OWN;
396 		if (i == 0)
397 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_FD;
398 		if (i < (nsegs - 1)) {
399 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_DIC;
400 			dma_desc[i].next = sc->aw_dma_desc_phys +
401 			    ((i + 1) * sizeof(struct aw_mmc_dma_desc));
402 		} else {
403 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_LD |
404 			    AW_MMC_DMA_CONFIG_ER;
405 			dma_desc[i].next = 0;
406 		}
407 	}
408 }
409 
410 static int
411 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
412 {
413 	bus_dmasync_op_t sync_op;
414 	int error;
415 	struct mmc_command *cmd;
416 	uint32_t val;
417 
418 	cmd = sc->aw_req->cmd;
419 	if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
420 		return (EFBIG);
421 	error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
422 	    cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
423 	if (error)
424 		return (error);
425 	if (sc->aw_dma_map_err)
426 		return (sc->aw_dma_map_err);
427 
428 	if (cmd->data->flags & MMC_DATA_WRITE)
429 		sync_op = BUS_DMASYNC_PREWRITE;
430 	else
431 		sync_op = BUS_DMASYNC_PREREAD;
432 	bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
433 	bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
434 
435 	/* Enable DMA */
436 	val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
437 	val &= ~AW_MMC_GCTL_FIFO_AC_MOD;
438 	val |= AW_MMC_GCTL_DMA_ENB;
439 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
440 
441 	/* Reset DMA */
442 	val |= AW_MMC_GCTL_DMA_RST;
443 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
444 
445 	AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
446 	AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
447 	    AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
448 
449 	/* Enable RX or TX DMA interrupt */
450 	val = AW_MMC_READ_4(sc, AW_MMC_IDIE);
451 	if (cmd->data->flags & MMC_DATA_WRITE)
452 		val |= AW_MMC_IDST_TX_INT;
453 	else
454 		val |= AW_MMC_IDST_RX_INT;
455 	AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
456 
457 	/* Set DMA descritptor list address */
458 	AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
459 
460 	/* FIFO trigger level */
461 	AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
462 
463 	return (0);
464 }
465 
466 static int
467 aw_mmc_reset(struct aw_mmc_softc *sc)
468 {
469 	uint32_t reg;
470 	int timeout;
471 
472 	reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
473 	reg |= AW_MMC_GCTL_RESET;
474 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
475 	timeout = 1000;
476 	while (--timeout > 0) {
477 		if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0)
478 			break;
479 		DELAY(100);
480 	}
481 	if (timeout == 0)
482 		return (ETIMEDOUT);
483 
484 	return (0);
485 }
486 
487 static int
488 aw_mmc_init(struct aw_mmc_softc *sc)
489 {
490 	uint32_t reg;
491 	int ret;
492 
493 	ret = aw_mmc_reset(sc);
494 	if (ret != 0)
495 		return (ret);
496 
497 	/* Set the timeout. */
498 	AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
499 	    AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
500 	    AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
501 
502 	/* Unmask interrupts. */
503 	AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0);
504 
505 	/* Clear pending interrupts. */
506 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
507 
508 	/* Debug register, undocumented */
509 	AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb);
510 
511 	/* Function select register */
512 	AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000);
513 
514 	AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
515 
516 	/* Enable interrupts and disable AHB access. */
517 	reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
518 	reg |= AW_MMC_GCTL_INT_ENB;
519 	reg &= ~AW_MMC_GCTL_FIFO_AC_MOD;
520 	reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS;
521 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
522 
523 	return (0);
524 }
525 
526 static void
527 aw_mmc_req_done(struct aw_mmc_softc *sc)
528 {
529 	struct mmc_command *cmd;
530 	struct mmc_request *req;
531 	uint32_t val, mask;
532 	int retry;
533 
534 	cmd = sc->aw_req->cmd;
535 	if (cmd->error != MMC_ERR_NONE) {
536 		/* Reset the FIFO and DMA engines. */
537 		mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST;
538 		val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
539 		AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
540 
541 		retry = AW_MMC_RESET_RETRY;
542 		while (--retry > 0) {
543 			val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
544 			if ((val & mask) == 0)
545 				break;
546 			DELAY(10);
547 		}
548 		if (retry == 0)
549 			device_printf(sc->aw_dev,
550 			    "timeout resetting DMA/FIFO\n");
551 		aw_mmc_update_clock(sc, 1);
552 	}
553 
554 	req = sc->aw_req;
555 	callout_stop(&sc->aw_timeoutc);
556 	sc->aw_req = NULL;
557 	sc->aw_intr = 0;
558 	sc->aw_resid = 0;
559 	sc->aw_dma_map_err = 0;
560 	sc->aw_intr_wait = 0;
561 	req->done(req);
562 }
563 
564 static void
565 aw_mmc_req_ok(struct aw_mmc_softc *sc)
566 {
567 	int timeout;
568 	struct mmc_command *cmd;
569 	uint32_t status;
570 
571 	timeout = 1000;
572 	while (--timeout > 0) {
573 		status = AW_MMC_READ_4(sc, AW_MMC_STAR);
574 		if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
575 			break;
576 		DELAY(1000);
577 	}
578 	cmd = sc->aw_req->cmd;
579 	if (timeout == 0) {
580 		cmd->error = MMC_ERR_FAILED;
581 		aw_mmc_req_done(sc);
582 		return;
583 	}
584 	if (cmd->flags & MMC_RSP_PRESENT) {
585 		if (cmd->flags & MMC_RSP_136) {
586 			cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
587 			cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
588 			cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
589 			cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
590 		} else
591 			cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
592 	}
593 	/* All data has been transferred ? */
594 	if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
595 		cmd->error = MMC_ERR_FAILED;
596 	aw_mmc_req_done(sc);
597 }
598 
599 static void
600 aw_mmc_timeout(void *arg)
601 {
602 	struct aw_mmc_softc *sc;
603 
604 	sc = (struct aw_mmc_softc *)arg;
605 	if (sc->aw_req != NULL) {
606 		device_printf(sc->aw_dev, "controller timeout\n");
607 		sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
608 		aw_mmc_req_done(sc);
609 	} else
610 		device_printf(sc->aw_dev,
611 		    "Spurious timeout - no active request\n");
612 }
613 
614 static void
615 aw_mmc_intr(void *arg)
616 {
617 	bus_dmasync_op_t sync_op;
618 	struct aw_mmc_softc *sc;
619 	struct mmc_data *data;
620 	uint32_t idst, imask, rint;
621 
622 	sc = (struct aw_mmc_softc *)arg;
623 	AW_MMC_LOCK(sc);
624 	rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
625 	idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
626 	imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
627 	if (idst == 0 && imask == 0 && rint == 0) {
628 		AW_MMC_UNLOCK(sc);
629 		return;
630 	}
631 #ifdef DEBUG
632 	device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
633 	    idst, imask, rint);
634 #endif
635 	if (sc->aw_req == NULL) {
636 		device_printf(sc->aw_dev,
637 		    "Spurious interrupt - no active request, rint: 0x%08X\n",
638 		    rint);
639 		goto end;
640 	}
641 	if (rint & AW_MMC_INT_ERR_BIT) {
642 		if (bootverbose)
643 			device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
644 		if (rint & AW_MMC_INT_RESP_TIMEOUT)
645 			sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
646 		else
647 			sc->aw_req->cmd->error = MMC_ERR_FAILED;
648 		aw_mmc_req_done(sc);
649 		goto end;
650 	}
651 	if (idst & AW_MMC_IDST_ERROR) {
652 		device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
653 		sc->aw_req->cmd->error = MMC_ERR_FAILED;
654 		aw_mmc_req_done(sc);
655 		goto end;
656 	}
657 
658 	sc->aw_intr |= rint;
659 	data = sc->aw_req->cmd->data;
660 	if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
661 		if (data->flags & MMC_DATA_WRITE)
662 			sync_op = BUS_DMASYNC_POSTWRITE;
663 		else
664 			sync_op = BUS_DMASYNC_POSTREAD;
665 		bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
666 		    sync_op);
667 		bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
668 		    BUS_DMASYNC_POSTWRITE);
669 		bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
670 		sc->aw_resid = data->len >> 2;
671 	}
672 	if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
673 		aw_mmc_req_ok(sc);
674 
675 end:
676 	AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
677 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
678 	AW_MMC_UNLOCK(sc);
679 }
680 
681 static int
682 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
683 {
684 	int blksz;
685 	struct aw_mmc_softc *sc;
686 	struct mmc_command *cmd;
687 	uint32_t cmdreg, imask;
688 	int err;
689 
690 	sc = device_get_softc(bus);
691 	AW_MMC_LOCK(sc);
692 	if (sc->aw_req) {
693 		AW_MMC_UNLOCK(sc);
694 		return (EBUSY);
695 	}
696 
697 	sc->aw_req = req;
698 	cmd = req->cmd;
699 	cmdreg = AW_MMC_CMDR_LOAD;
700 	imask = AW_MMC_INT_ERR_BIT;
701 	sc->aw_intr_wait = 0;
702 	sc->aw_intr = 0;
703 	sc->aw_resid = 0;
704 	cmd->error = MMC_ERR_NONE;
705 
706 	if (cmd->opcode == MMC_GO_IDLE_STATE)
707 		cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
708 
709 	if (cmd->flags & MMC_RSP_PRESENT)
710 		cmdreg |= AW_MMC_CMDR_RESP_RCV;
711 	if (cmd->flags & MMC_RSP_136)
712 		cmdreg |= AW_MMC_CMDR_LONG_RESP;
713 	if (cmd->flags & MMC_RSP_CRC)
714 		cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
715 
716 	if (cmd->data) {
717 		cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
718 
719 		if (cmd->data->flags & MMC_DATA_MULTI) {
720 			cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
721 			imask |= AW_MMC_INT_AUTO_STOP_DONE;
722 			sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
723 		} else {
724 			sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
725 			imask |= AW_MMC_INT_DATA_OVER;
726 		}
727 		if (cmd->data->flags & MMC_DATA_WRITE)
728 			cmdreg |= AW_MMC_CMDR_DIR_WRITE;
729 
730 		blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
731 		AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
732 		AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
733 	} else {
734 		imask |= AW_MMC_INT_CMD_DONE;
735 	}
736 
737 	/* Enable the interrupts we are interested in */
738 	AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask);
739 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
740 
741 	/* Enable auto stop if needed */
742 	AW_MMC_WRITE_4(sc, AW_MMC_A12A,
743 	    cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff);
744 
745 	/* Write the command argument */
746 	AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
747 
748 	/*
749 	 * If we don't have data start the request
750 	 * if we do prepare the dma request and start the request
751 	 */
752 	if (cmd->data == NULL) {
753 		AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
754 	} else {
755 		err = aw_mmc_prepare_dma(sc);
756 		if (err != 0)
757 			device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
758 
759 		AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
760 	}
761 
762 	callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
763 	    aw_mmc_timeout, sc);
764 	AW_MMC_UNLOCK(sc);
765 
766 	return (0);
767 }
768 
769 static int
770 aw_mmc_read_ivar(device_t bus, device_t child, int which,
771     uintptr_t *result)
772 {
773 	struct aw_mmc_softc *sc;
774 
775 	sc = device_get_softc(bus);
776 	switch (which) {
777 	default:
778 		return (EINVAL);
779 	case MMCBR_IVAR_BUS_MODE:
780 		*(int *)result = sc->aw_host.ios.bus_mode;
781 		break;
782 	case MMCBR_IVAR_BUS_WIDTH:
783 		*(int *)result = sc->aw_host.ios.bus_width;
784 		break;
785 	case MMCBR_IVAR_CHIP_SELECT:
786 		*(int *)result = sc->aw_host.ios.chip_select;
787 		break;
788 	case MMCBR_IVAR_CLOCK:
789 		*(int *)result = sc->aw_host.ios.clock;
790 		break;
791 	case MMCBR_IVAR_F_MIN:
792 		*(int *)result = sc->aw_host.f_min;
793 		break;
794 	case MMCBR_IVAR_F_MAX:
795 		*(int *)result = sc->aw_host.f_max;
796 		break;
797 	case MMCBR_IVAR_HOST_OCR:
798 		*(int *)result = sc->aw_host.host_ocr;
799 		break;
800 	case MMCBR_IVAR_MODE:
801 		*(int *)result = sc->aw_host.mode;
802 		break;
803 	case MMCBR_IVAR_OCR:
804 		*(int *)result = sc->aw_host.ocr;
805 		break;
806 	case MMCBR_IVAR_POWER_MODE:
807 		*(int *)result = sc->aw_host.ios.power_mode;
808 		break;
809 	case MMCBR_IVAR_VDD:
810 		*(int *)result = sc->aw_host.ios.vdd;
811 		break;
812 	case MMCBR_IVAR_VCCQ:
813 		*(int *)result = sc->aw_host.ios.vccq;
814 		break;
815 	case MMCBR_IVAR_CAPS:
816 		*(int *)result = sc->aw_host.caps;
817 		break;
818 	case MMCBR_IVAR_TIMING:
819 		*(int *)result = sc->aw_host.ios.timing;
820 		break;
821 	case MMCBR_IVAR_MAX_DATA:
822 		*(int *)result = 65535;
823 		break;
824 	}
825 
826 	return (0);
827 }
828 
829 static int
830 aw_mmc_write_ivar(device_t bus, device_t child, int which,
831     uintptr_t value)
832 {
833 	struct aw_mmc_softc *sc;
834 
835 	sc = device_get_softc(bus);
836 	switch (which) {
837 	default:
838 		return (EINVAL);
839 	case MMCBR_IVAR_BUS_MODE:
840 		sc->aw_host.ios.bus_mode = value;
841 		break;
842 	case MMCBR_IVAR_BUS_WIDTH:
843 		sc->aw_host.ios.bus_width = value;
844 		break;
845 	case MMCBR_IVAR_CHIP_SELECT:
846 		sc->aw_host.ios.chip_select = value;
847 		break;
848 	case MMCBR_IVAR_CLOCK:
849 		sc->aw_host.ios.clock = value;
850 		break;
851 	case MMCBR_IVAR_MODE:
852 		sc->aw_host.mode = value;
853 		break;
854 	case MMCBR_IVAR_OCR:
855 		sc->aw_host.ocr = value;
856 		break;
857 	case MMCBR_IVAR_POWER_MODE:
858 		sc->aw_host.ios.power_mode = value;
859 		break;
860 	case MMCBR_IVAR_VDD:
861 		sc->aw_host.ios.vdd = value;
862 		break;
863 	case MMCBR_IVAR_VCCQ:
864 		sc->aw_host.ios.vccq = value;
865 		break;
866 	case MMCBR_IVAR_TIMING:
867 		sc->aw_host.ios.timing = value;
868 		break;
869 	/* These are read-only */
870 	case MMCBR_IVAR_CAPS:
871 	case MMCBR_IVAR_HOST_OCR:
872 	case MMCBR_IVAR_F_MIN:
873 	case MMCBR_IVAR_F_MAX:
874 	case MMCBR_IVAR_MAX_DATA:
875 		return (EINVAL);
876 	}
877 
878 	return (0);
879 }
880 
881 static int
882 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
883 {
884 	uint32_t reg;
885 	int retry;
886 
887 	reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
888 	reg &= ~(AW_MMC_CKCR_CCLK_ENB | AW_MMC_CKCR_CCLK_CTRL |
889 	    AW_MMC_CKCR_CCLK_MASK_DATA0);
890 
891 	if (clkon)
892 		reg |= AW_MMC_CKCR_CCLK_ENB;
893 	if (sc->aw_mmc_conf->mask_data0)
894 		reg |= AW_MMC_CKCR_CCLK_MASK_DATA0;
895 
896 	AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
897 
898 	reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
899 	    AW_MMC_CMDR_WAIT_PRE_OVER;
900 	AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
901 	retry = 0xfffff;
902 
903 	while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
904 		reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
905 		DELAY(10);
906 	}
907 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
908 
909 	if (reg & AW_MMC_CMDR_LOAD) {
910 		device_printf(sc->aw_dev, "timeout updating clock\n");
911 		return (ETIMEDOUT);
912 	}
913 
914 	if (sc->aw_mmc_conf->mask_data0) {
915 		reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
916 		reg &= ~AW_MMC_CKCR_CCLK_MASK_DATA0;
917 		AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
918 	}
919 
920 	return (0);
921 }
922 
923 static void
924 aw_mmc_set_vccq(struct aw_mmc_softc *sc, int32_t vccq)
925 {
926 	int uvolt;
927 
928 	if (sc->aw_reg_vqmmc == NULL)
929 		return;
930 
931 	switch (vccq) {
932 	case vccq_180:
933 		uvolt = 1800000;
934 		break;
935 	case vccq_330:
936 		uvolt = 3300000;
937 		break;
938 	default:
939 		return;
940 	}
941 
942 	if (regulator_set_voltage(sc->aw_reg_vqmmc,
943 	    uvolt, uvolt) != 0)
944 		device_printf(sc->aw_dev,
945 		    "Cannot set vqmmc to %d<->%d\n",
946 		    uvolt,
947 		    uvolt);
948 }
949 
950 static int
951 aw_mmc_update_ios(device_t bus, device_t child)
952 {
953 	int error;
954 	struct aw_mmc_softc *sc;
955 	struct mmc_ios *ios;
956 	unsigned int clock;
957 	uint32_t reg, div = 1;
958 
959 	sc = device_get_softc(bus);
960 
961 	ios = &sc->aw_host.ios;
962 
963 	/* Set the bus width. */
964 	switch (ios->bus_width) {
965 	case bus_width_1:
966 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
967 		break;
968 	case bus_width_4:
969 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
970 		break;
971 	case bus_width_8:
972 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
973 		break;
974 	}
975 
976 	switch (ios->power_mode) {
977 	case power_on:
978 		break;
979 	case power_off:
980 		if (bootverbose)
981 			device_printf(sc->aw_dev, "Powering down sd/mmc\n");
982 
983 		if (sc->aw_reg_vmmc)
984 			regulator_disable(sc->aw_reg_vmmc);
985 		if (sc->aw_reg_vqmmc)
986 			regulator_disable(sc->aw_reg_vqmmc);
987 
988 		aw_mmc_reset(sc);
989 		break;
990 	case power_up:
991 		if (bootverbose)
992 			device_printf(sc->aw_dev, "Powering up sd/mmc\n");
993 
994 		if (sc->aw_reg_vmmc)
995 			regulator_enable(sc->aw_reg_vmmc);
996 		if (sc->aw_reg_vqmmc)
997 			regulator_enable(sc->aw_reg_vqmmc);
998 		aw_mmc_init(sc);
999 		break;
1000 	};
1001 
1002 	if (ios->vccq != sc->aw_vccq) {
1003 		aw_mmc_set_vccq(sc, ios->vccq);
1004 		sc->aw_vccq = ios->vccq;
1005 	}
1006 
1007 	/* Enable ddr mode if needed */
1008 	reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
1009 	if (ios->timing == bus_timing_uhs_ddr50 ||
1010 	  ios->timing == bus_timing_mmc_ddr52)
1011 		reg |= AW_MMC_GCTL_DDR_MOD_SEL;
1012 	else
1013 		reg &= ~AW_MMC_GCTL_DDR_MOD_SEL;
1014 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
1015 
1016 	if (ios->clock && ios->clock != sc->aw_clock) {
1017 		sc->aw_clock = clock = ios->clock;
1018 
1019 		/* Disable clock */
1020 		error = aw_mmc_update_clock(sc, 0);
1021 		if (error != 0)
1022 			return (error);
1023 
1024 		if (ios->timing == bus_timing_mmc_ddr52 &&
1025 		    (sc->aw_mmc_conf->new_timing ||
1026 		    ios->bus_width == bus_width_8)) {
1027 			div = 2;
1028 			clock <<= 1;
1029 		}
1030 
1031 		/* Reset the divider. */
1032 		reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1033 		reg &= ~AW_MMC_CKCR_CCLK_DIV;
1034 		reg |= div - 1;
1035 		AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1036 
1037 		/* New timing mode if needed */
1038 		if (sc->aw_mmc_conf->new_timing) {
1039 			reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
1040 			reg |= AW_MMC_NTSR_MODE_SELECT;
1041 			AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
1042 		}
1043 
1044 		/* Set the MMC clock. */
1045 		error = clk_set_freq(sc->aw_clk_mmc, clock,
1046 		    CLK_SET_ROUND_DOWN);
1047 		if (error != 0) {
1048 			device_printf(sc->aw_dev,
1049 			    "failed to set frequency to %u Hz: %d\n",
1050 			    clock, error);
1051 			return (error);
1052 		}
1053 
1054 		if (sc->aw_mmc_conf->can_calibrate)
1055 			AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
1056 
1057 		/* Enable clock. */
1058 		error = aw_mmc_update_clock(sc, 1);
1059 		if (error != 0)
1060 			return (error);
1061 	}
1062 
1063 
1064 	return (0);
1065 }
1066 
1067 static int
1068 aw_mmc_get_ro(device_t bus, device_t child)
1069 {
1070 
1071 	return (0);
1072 }
1073 
1074 static int
1075 aw_mmc_acquire_host(device_t bus, device_t child)
1076 {
1077 	struct aw_mmc_softc *sc;
1078 	int error;
1079 
1080 	sc = device_get_softc(bus);
1081 	AW_MMC_LOCK(sc);
1082 	while (sc->aw_bus_busy) {
1083 		error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1084 		if (error != 0) {
1085 			AW_MMC_UNLOCK(sc);
1086 			return (error);
1087 		}
1088 	}
1089 	sc->aw_bus_busy++;
1090 	AW_MMC_UNLOCK(sc);
1091 
1092 	return (0);
1093 }
1094 
1095 static int
1096 aw_mmc_release_host(device_t bus, device_t child)
1097 {
1098 	struct aw_mmc_softc *sc;
1099 
1100 	sc = device_get_softc(bus);
1101 	AW_MMC_LOCK(sc);
1102 	sc->aw_bus_busy--;
1103 	wakeup(sc);
1104 	AW_MMC_UNLOCK(sc);
1105 
1106 	return (0);
1107 }
1108 
1109 static device_method_t aw_mmc_methods[] = {
1110 	/* Device interface */
1111 	DEVMETHOD(device_probe,		aw_mmc_probe),
1112 	DEVMETHOD(device_attach,	aw_mmc_attach),
1113 	DEVMETHOD(device_detach,	aw_mmc_detach),
1114 
1115 	/* Bus interface */
1116 	DEVMETHOD(bus_read_ivar,	aw_mmc_read_ivar),
1117 	DEVMETHOD(bus_write_ivar,	aw_mmc_write_ivar),
1118 
1119 	/* MMC bridge interface */
1120 	DEVMETHOD(mmcbr_update_ios,	aw_mmc_update_ios),
1121 	DEVMETHOD(mmcbr_request,	aw_mmc_request),
1122 	DEVMETHOD(mmcbr_get_ro,		aw_mmc_get_ro),
1123 	DEVMETHOD(mmcbr_acquire_host,	aw_mmc_acquire_host),
1124 	DEVMETHOD(mmcbr_release_host,	aw_mmc_release_host),
1125 
1126 	DEVMETHOD_END
1127 };
1128 
1129 static devclass_t aw_mmc_devclass;
1130 
1131 static driver_t aw_mmc_driver = {
1132 	"aw_mmc",
1133 	aw_mmc_methods,
1134 	sizeof(struct aw_mmc_softc),
1135 };
1136 
1137 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL,
1138     NULL);
1139 MMC_DECLARE_BRIDGE(aw_mmc);
1140