xref: /freebsd/sys/arm/allwinner/aw_mmc.c (revision c5fda9bac0325eb8c5b447717862d279006f318f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
5  * Copyright (c) 2013 Alexander Fedorov
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/resource.h>
42 #include <sys/rman.h>
43 #include <sys/sysctl.h>
44 
45 #include <machine/bus.h>
46 
47 #include <dev/ofw/ofw_bus.h>
48 #include <dev/ofw/ofw_bus_subr.h>
49 
50 #include <dev/mmc/bridge.h>
51 #include <dev/mmc/mmcbrvar.h>
52 
53 #include <arm/allwinner/aw_mmc.h>
54 #include <dev/extres/clk/clk.h>
55 #include <dev/extres/hwreset/hwreset.h>
56 #include <dev/extres/regulator/regulator.h>
57 
58 #define	AW_MMC_MEMRES		0
59 #define	AW_MMC_IRQRES		1
60 #define	AW_MMC_RESSZ		2
61 #define	AW_MMC_DMA_SEGS		(PAGE_SIZE / sizeof(struct aw_mmc_dma_desc))
62 #define	AW_MMC_DMA_DESC_SIZE	(sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS)
63 #define	AW_MMC_DMA_FTRGLEVEL	0x20070008
64 
65 #define	AW_MMC_RESET_RETRY	1000
66 
67 #define	CARD_ID_FREQUENCY	400000
68 
69 struct aw_mmc_conf {
70 	uint32_t	dma_xferlen;
71 	bool		mask_data0;
72 	bool		can_calibrate;
73 	bool		new_timing;
74 };
75 
76 static const struct aw_mmc_conf a10_mmc_conf = {
77 	.dma_xferlen = 0x2000,
78 };
79 
80 static const struct aw_mmc_conf a13_mmc_conf = {
81 	.dma_xferlen = 0x10000,
82 };
83 
84 static const struct aw_mmc_conf a64_mmc_conf = {
85 	.dma_xferlen = 0x10000,
86 	.mask_data0 = true,
87 	.can_calibrate = true,
88 	.new_timing = true,
89 };
90 
91 static const struct aw_mmc_conf a64_emmc_conf = {
92 	.dma_xferlen = 0x2000,
93 	.can_calibrate = true,
94 };
95 
96 static struct ofw_compat_data compat_data[] = {
97 	{"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
98 	{"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
99 	{"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
100 	{"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
101 	{"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
102 	{NULL,             0}
103 };
104 
105 struct aw_mmc_softc {
106 	device_t		aw_dev;
107 	clk_t			aw_clk_ahb;
108 	clk_t			aw_clk_mmc;
109 	hwreset_t		aw_rst_ahb;
110 	int			aw_bus_busy;
111 	int			aw_resid;
112 	int			aw_timeout;
113 	struct callout		aw_timeoutc;
114 	struct mmc_host		aw_host;
115 	struct mmc_request *	aw_req;
116 	struct mtx		aw_mtx;
117 	struct resource *	aw_res[AW_MMC_RESSZ];
118 	struct aw_mmc_conf *	aw_mmc_conf;
119 	uint32_t		aw_intr;
120 	uint32_t		aw_intr_wait;
121 	void *			aw_intrhand;
122 	regulator_t		aw_reg_vmmc;
123 	regulator_t		aw_reg_vqmmc;
124 	unsigned int		aw_clock;
125 
126 	/* Fields required for DMA access. */
127 	bus_addr_t	  	aw_dma_desc_phys;
128 	bus_dmamap_t		aw_dma_map;
129 	bus_dma_tag_t 		aw_dma_tag;
130 	void * 			aw_dma_desc;
131 	bus_dmamap_t		aw_dma_buf_map;
132 	bus_dma_tag_t		aw_dma_buf_tag;
133 	int			aw_dma_map_err;
134 };
135 
136 static struct resource_spec aw_mmc_res_spec[] = {
137 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
138 	{ SYS_RES_IRQ,		0,	RF_ACTIVE | RF_SHAREABLE },
139 	{ -1,			0,	0 }
140 };
141 
142 static int aw_mmc_probe(device_t);
143 static int aw_mmc_attach(device_t);
144 static int aw_mmc_detach(device_t);
145 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
146 static int aw_mmc_reset(struct aw_mmc_softc *);
147 static int aw_mmc_init(struct aw_mmc_softc *);
148 static void aw_mmc_intr(void *);
149 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
150 
151 static int aw_mmc_update_ios(device_t, device_t);
152 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
153 static int aw_mmc_get_ro(device_t, device_t);
154 static int aw_mmc_acquire_host(device_t, device_t);
155 static int aw_mmc_release_host(device_t, device_t);
156 
157 #define	AW_MMC_LOCK(_sc)	mtx_lock(&(_sc)->aw_mtx)
158 #define	AW_MMC_UNLOCK(_sc)	mtx_unlock(&(_sc)->aw_mtx)
159 #define	AW_MMC_READ_4(_sc, _reg)					\
160 	bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
161 #define	AW_MMC_WRITE_4(_sc, _reg, _value)				\
162 	bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
163 
164 static int
165 aw_mmc_probe(device_t dev)
166 {
167 
168 	if (!ofw_bus_status_okay(dev))
169 		return (ENXIO);
170 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
171 		return (ENXIO);
172 
173 	device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
174 
175 	return (BUS_PROBE_DEFAULT);
176 }
177 
178 static int
179 aw_mmc_attach(device_t dev)
180 {
181 	device_t child;
182 	struct aw_mmc_softc *sc;
183 	struct sysctl_ctx_list *ctx;
184 	struct sysctl_oid_list *tree;
185 	uint32_t bus_width, max_freq;
186 	phandle_t node;
187 	int error;
188 
189 	node = ofw_bus_get_node(dev);
190 	sc = device_get_softc(dev);
191 	sc->aw_dev = dev;
192 
193 	sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
194 
195 	sc->aw_req = NULL;
196 	if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
197 		device_printf(dev, "cannot allocate device resources\n");
198 		return (ENXIO);
199 	}
200 	if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
201 	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
202 	    &sc->aw_intrhand)) {
203 		bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
204 		device_printf(dev, "cannot setup interrupt handler\n");
205 		return (ENXIO);
206 	}
207 	mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
208 	    MTX_DEF);
209 	callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
210 
211 	/* De-assert reset */
212 	if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
213 		error = hwreset_deassert(sc->aw_rst_ahb);
214 		if (error != 0) {
215 			device_printf(dev, "cannot de-assert reset\n");
216 			goto fail;
217 		}
218 	}
219 
220 	/* Activate the module clock. */
221 	error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
222 	if (error != 0) {
223 		device_printf(dev, "cannot get ahb clock\n");
224 		goto fail;
225 	}
226 	error = clk_enable(sc->aw_clk_ahb);
227 	if (error != 0) {
228 		device_printf(dev, "cannot enable ahb clock\n");
229 		goto fail;
230 	}
231 	error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
232 	if (error != 0) {
233 		device_printf(dev, "cannot get mmc clock\n");
234 		goto fail;
235 	}
236 	error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
237 	    CLK_SET_ROUND_DOWN);
238 	if (error != 0) {
239 		device_printf(dev, "cannot init mmc clock\n");
240 		goto fail;
241 	}
242 	error = clk_enable(sc->aw_clk_mmc);
243 	if (error != 0) {
244 		device_printf(dev, "cannot enable mmc clock\n");
245 		goto fail;
246 	}
247 
248 	sc->aw_timeout = 10;
249 	ctx = device_get_sysctl_ctx(dev);
250 	tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
251 	SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
252 	    &sc->aw_timeout, 0, "Request timeout in seconds");
253 
254 	/* Soft Reset controller. */
255 	if (aw_mmc_reset(sc) != 0) {
256 		device_printf(dev, "cannot reset the controller\n");
257 		goto fail;
258 	}
259 
260 	if (aw_mmc_setup_dma(sc) != 0) {
261 		device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
262 		goto fail;
263 	}
264 
265 	if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
266 		bus_width = 4;
267 
268 	if (regulator_get_by_ofw_property(dev, 0, "vmmc-supply",
269 	    &sc->aw_reg_vmmc) == 0) {
270 		if (bootverbose)
271 			device_printf(dev, "vmmc-supply regulator found\n");
272 	}
273 	if (regulator_get_by_ofw_property(dev, 0, "vqmmc-supply",
274 	    &sc->aw_reg_vqmmc) == 0 && bootverbose) {
275 		if (bootverbose)
276 			device_printf(dev, "vqmmc-supply regulator found\n");
277 	}
278 
279 	sc->aw_host.f_min = 400000;
280 
281 	if (OF_getencprop(node, "max-frequency", &max_freq,
282 	    sizeof(uint32_t)) <= 0)
283 		max_freq = 52000000;
284 	sc->aw_host.f_max = max_freq;
285 
286 	sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
287 	sc->aw_host.caps = MMC_CAP_HSPEED | MMC_CAP_UHS_SDR12 |
288 			   MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 |
289 			   MMC_CAP_UHS_DDR50 | MMC_CAP_MMC_DDR52;
290 
291 	sc->aw_host.caps |= MMC_CAP_SIGNALING_330 | MMC_CAP_SIGNALING_180;
292 
293 	if (bus_width >= 4)
294 		sc->aw_host.caps |= MMC_CAP_4_BIT_DATA;
295 	if (bus_width >= 8)
296 		sc->aw_host.caps |= MMC_CAP_8_BIT_DATA;
297 
298 	child = device_add_child(dev, "mmc", -1);
299 	if (child == NULL) {
300 		device_printf(dev, "attaching MMC bus failed!\n");
301 		goto fail;
302 	}
303 	if (device_probe_and_attach(child) != 0) {
304 		device_printf(dev, "attaching MMC child failed!\n");
305 		device_delete_child(dev, child);
306 		goto fail;
307 	}
308 
309 	return (0);
310 
311 fail:
312 	callout_drain(&sc->aw_timeoutc);
313 	mtx_destroy(&sc->aw_mtx);
314 	bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
315 	bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
316 
317 	return (ENXIO);
318 }
319 
320 static int
321 aw_mmc_detach(device_t dev)
322 {
323 
324 	return (EBUSY);
325 }
326 
327 static void
328 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
329 {
330 	struct aw_mmc_softc *sc;
331 
332 	sc = (struct aw_mmc_softc *)arg;
333 	if (err) {
334 		sc->aw_dma_map_err = err;
335 		return;
336 	}
337 	sc->aw_dma_desc_phys = segs[0].ds_addr;
338 }
339 
340 static int
341 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
342 {
343 	int error;
344 
345 	/* Allocate the DMA descriptor memory. */
346 	error = bus_dma_tag_create(
347 	    bus_get_dma_tag(sc->aw_dev),	/* parent */
348 	    AW_MMC_DMA_ALIGN, 0,		/* align, boundary */
349 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
350 	    BUS_SPACE_MAXADDR,			/* highaddr */
351 	    NULL, NULL,				/* filter, filterarg*/
352 	    AW_MMC_DMA_DESC_SIZE, 1,		/* maxsize, nsegment */
353 	    AW_MMC_DMA_DESC_SIZE,		/* maxsegsize */
354 	    0,					/* flags */
355 	    NULL, NULL,				/* lock, lockarg*/
356 	    &sc->aw_dma_tag);
357 	if (error)
358 		return (error);
359 
360 	error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
361 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
362 	    &sc->aw_dma_map);
363 	if (error)
364 		return (error);
365 
366 	error = bus_dmamap_load(sc->aw_dma_tag,
367 	    sc->aw_dma_map,
368 	    sc->aw_dma_desc, AW_MMC_DMA_DESC_SIZE,
369 	    aw_dma_desc_cb, sc, 0);
370 	if (error)
371 		return (error);
372 	if (sc->aw_dma_map_err)
373 		return (sc->aw_dma_map_err);
374 
375 	/* Create the DMA map for data transfers. */
376 	error = bus_dma_tag_create(
377 	    bus_get_dma_tag(sc->aw_dev),	/* parent */
378 	    AW_MMC_DMA_ALIGN, 0,		/* align, boundary */
379 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
380 	    BUS_SPACE_MAXADDR,			/* highaddr */
381 	    NULL, NULL,				/* filter, filterarg*/
382 	    sc->aw_mmc_conf->dma_xferlen *
383 	    AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS,	/* maxsize, nsegments */
384 	    sc->aw_mmc_conf->dma_xferlen,	/* maxsegsize */
385 	    BUS_DMA_ALLOCNOW,			/* flags */
386 	    NULL, NULL,				/* lock, lockarg*/
387 	    &sc->aw_dma_buf_tag);
388 	if (error)
389 		return (error);
390 	error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
391 	    &sc->aw_dma_buf_map);
392 	if (error)
393 		return (error);
394 
395 	return (0);
396 }
397 
398 static void
399 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
400 {
401 	int i;
402 	struct aw_mmc_dma_desc *dma_desc;
403 	struct aw_mmc_softc *sc;
404 
405 	sc = (struct aw_mmc_softc *)arg;
406 	sc->aw_dma_map_err = err;
407 
408 	if (err)
409 		return;
410 
411 	dma_desc = sc->aw_dma_desc;
412 	for (i = 0; i < nsegs; i++) {
413 		if (segs[i].ds_len == sc->aw_mmc_conf->dma_xferlen)
414 			dma_desc[i].buf_size = 0;		/* Size of 0 indicate max len */
415 		else
416 			dma_desc[i].buf_size = segs[i].ds_len;
417 		dma_desc[i].buf_addr = segs[i].ds_addr;
418 		dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
419 			AW_MMC_DMA_CONFIG_OWN | AW_MMC_DMA_CONFIG_DIC;
420 
421 		dma_desc[i].next = sc->aw_dma_desc_phys +
422 			((i + 1) * sizeof(struct aw_mmc_dma_desc));
423 	}
424 
425 	dma_desc[0].config |= AW_MMC_DMA_CONFIG_FD;
426 	dma_desc[nsegs - 1].config |= AW_MMC_DMA_CONFIG_LD |
427 		AW_MMC_DMA_CONFIG_ER;
428 	dma_desc[nsegs - 1].config &= ~AW_MMC_DMA_CONFIG_DIC;
429 	dma_desc[nsegs - 1].next = 0;
430 }
431 
432 static int
433 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
434 {
435 	bus_dmasync_op_t sync_op;
436 	int error;
437 	struct mmc_command *cmd;
438 	uint32_t val;
439 
440 	cmd = sc->aw_req->cmd;
441 	if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
442 		return (EFBIG);
443 	error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
444 	    cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
445 	if (error)
446 		return (error);
447 	if (sc->aw_dma_map_err)
448 		return (sc->aw_dma_map_err);
449 
450 	if (cmd->data->flags & MMC_DATA_WRITE)
451 		sync_op = BUS_DMASYNC_PREWRITE;
452 	else
453 		sync_op = BUS_DMASYNC_PREREAD;
454 	bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
455 	bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
456 
457 	/* Enable DMA */
458 	val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
459 	val &= ~AW_MMC_GCTL_FIFO_AC_MOD;
460 	val |= AW_MMC_GCTL_DMA_ENB;
461 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
462 
463 	/* Reset DMA */
464 	val |= AW_MMC_GCTL_DMA_RST;
465 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
466 
467 	AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
468 	AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
469 	    AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
470 
471 	/* Enable RX or TX DMA interrupt */
472 	val = AW_MMC_READ_4(sc, AW_MMC_IDIE);
473 	if (cmd->data->flags & MMC_DATA_WRITE)
474 		val |= AW_MMC_IDST_TX_INT;
475 	else
476 		val |= AW_MMC_IDST_RX_INT;
477 	AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
478 
479 	/* Set DMA descritptor list address */
480 	AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
481 
482 	/* FIFO trigger level */
483 	AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
484 
485 	return (0);
486 }
487 
488 static int
489 aw_mmc_reset(struct aw_mmc_softc *sc)
490 {
491 	uint32_t reg;
492 	int timeout;
493 
494 	reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
495 	reg |= AW_MMC_GCTL_RESET;
496 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
497 	timeout = AW_MMC_RESET_RETRY;
498 	while (--timeout > 0) {
499 		if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0)
500 			break;
501 		DELAY(100);
502 	}
503 	if (timeout == 0)
504 		return (ETIMEDOUT);
505 
506 	return (0);
507 }
508 
509 static int
510 aw_mmc_init(struct aw_mmc_softc *sc)
511 {
512 	uint32_t reg;
513 	int ret;
514 
515 	ret = aw_mmc_reset(sc);
516 	if (ret != 0)
517 		return (ret);
518 
519 	/* Set the timeout. */
520 	AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
521 	    AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
522 	    AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
523 
524 	/* Unmask interrupts. */
525 	AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0);
526 
527 	/* Clear pending interrupts. */
528 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
529 
530 	/* Debug register, undocumented */
531 	AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb);
532 
533 	/* Function select register */
534 	AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000);
535 
536 	AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
537 
538 	/* Enable interrupts and disable AHB access. */
539 	reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
540 	reg |= AW_MMC_GCTL_INT_ENB;
541 	reg &= ~AW_MMC_GCTL_FIFO_AC_MOD;
542 	reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS;
543 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
544 
545 	return (0);
546 }
547 
548 static void
549 aw_mmc_req_done(struct aw_mmc_softc *sc)
550 {
551 	struct mmc_command *cmd;
552 	struct mmc_request *req;
553 	uint32_t val, mask;
554 	int retry;
555 
556 	cmd = sc->aw_req->cmd;
557 	if (cmd->error != MMC_ERR_NONE) {
558 		/* Reset the FIFO and DMA engines. */
559 		mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST;
560 		val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
561 		AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
562 
563 		retry = AW_MMC_RESET_RETRY;
564 		while (--retry > 0) {
565 			if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) &
566 			    AW_MMC_GCTL_RESET) == 0)
567 				break;
568 			DELAY(100);
569 		}
570 		if (retry == 0)
571 			device_printf(sc->aw_dev,
572 			    "timeout resetting DMA/FIFO\n");
573 		aw_mmc_update_clock(sc, 1);
574 	}
575 
576 	req = sc->aw_req;
577 	callout_stop(&sc->aw_timeoutc);
578 	sc->aw_req = NULL;
579 	sc->aw_intr = 0;
580 	sc->aw_resid = 0;
581 	sc->aw_dma_map_err = 0;
582 	sc->aw_intr_wait = 0;
583 	req->done(req);
584 }
585 
586 static void
587 aw_mmc_req_ok(struct aw_mmc_softc *sc)
588 {
589 	int timeout;
590 	struct mmc_command *cmd;
591 	uint32_t status;
592 
593 	timeout = 1000;
594 	while (--timeout > 0) {
595 		status = AW_MMC_READ_4(sc, AW_MMC_STAR);
596 		if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
597 			break;
598 		DELAY(1000);
599 	}
600 	cmd = sc->aw_req->cmd;
601 	if (timeout == 0) {
602 		cmd->error = MMC_ERR_FAILED;
603 		aw_mmc_req_done(sc);
604 		return;
605 	}
606 	if (cmd->flags & MMC_RSP_PRESENT) {
607 		if (cmd->flags & MMC_RSP_136) {
608 			cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
609 			cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
610 			cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
611 			cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
612 		} else
613 			cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
614 	}
615 	/* All data has been transferred ? */
616 	if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
617 		cmd->error = MMC_ERR_FAILED;
618 	aw_mmc_req_done(sc);
619 }
620 
621 static void
622 aw_mmc_timeout(void *arg)
623 {
624 	struct aw_mmc_softc *sc;
625 
626 	sc = (struct aw_mmc_softc *)arg;
627 	if (sc->aw_req != NULL) {
628 		device_printf(sc->aw_dev, "controller timeout\n");
629 		sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
630 		aw_mmc_req_done(sc);
631 	} else
632 		device_printf(sc->aw_dev,
633 		    "Spurious timeout - no active request\n");
634 }
635 
636 static void
637 aw_mmc_intr(void *arg)
638 {
639 	bus_dmasync_op_t sync_op;
640 	struct aw_mmc_softc *sc;
641 	struct mmc_data *data;
642 	uint32_t idst, imask, rint;
643 
644 	sc = (struct aw_mmc_softc *)arg;
645 	AW_MMC_LOCK(sc);
646 	rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
647 	idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
648 	imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
649 	if (idst == 0 && imask == 0 && rint == 0) {
650 		AW_MMC_UNLOCK(sc);
651 		return;
652 	}
653 #ifdef DEBUG
654 	device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
655 	    idst, imask, rint);
656 #endif
657 	if (sc->aw_req == NULL) {
658 		device_printf(sc->aw_dev,
659 		    "Spurious interrupt - no active request, rint: 0x%08X\n",
660 		    rint);
661 		goto end;
662 	}
663 	if (rint & AW_MMC_INT_ERR_BIT) {
664 		if (bootverbose)
665 			device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
666 		if (rint & AW_MMC_INT_RESP_TIMEOUT)
667 			sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
668 		else
669 			sc->aw_req->cmd->error = MMC_ERR_FAILED;
670 		aw_mmc_req_done(sc);
671 		goto end;
672 	}
673 	if (idst & AW_MMC_IDST_ERROR) {
674 		device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
675 		sc->aw_req->cmd->error = MMC_ERR_FAILED;
676 		aw_mmc_req_done(sc);
677 		goto end;
678 	}
679 
680 	sc->aw_intr |= rint;
681 	data = sc->aw_req->cmd->data;
682 	if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
683 		if (data->flags & MMC_DATA_WRITE)
684 			sync_op = BUS_DMASYNC_POSTWRITE;
685 		else
686 			sync_op = BUS_DMASYNC_POSTREAD;
687 		bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
688 		    sync_op);
689 		bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
690 		    BUS_DMASYNC_POSTWRITE);
691 		bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
692 		sc->aw_resid = data->len >> 2;
693 	}
694 	if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
695 		aw_mmc_req_ok(sc);
696 
697 end:
698 	AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
699 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
700 	AW_MMC_UNLOCK(sc);
701 }
702 
703 static int
704 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
705 {
706 	int blksz;
707 	struct aw_mmc_softc *sc;
708 	struct mmc_command *cmd;
709 	uint32_t cmdreg, imask;
710 	int err;
711 
712 	sc = device_get_softc(bus);
713 
714 	AW_MMC_LOCK(sc);
715 	if (sc->aw_req) {
716 		AW_MMC_UNLOCK(sc);
717 		return (EBUSY);
718 	}
719 
720 	sc->aw_req = req;
721 	cmd = req->cmd;
722 	cmdreg = AW_MMC_CMDR_LOAD;
723 	imask = AW_MMC_INT_ERR_BIT;
724 	sc->aw_intr_wait = 0;
725 	sc->aw_intr = 0;
726 	sc->aw_resid = 0;
727 	cmd->error = MMC_ERR_NONE;
728 
729 	if (cmd->opcode == MMC_GO_IDLE_STATE)
730 		cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
731 
732 	if (cmd->flags & MMC_RSP_PRESENT)
733 		cmdreg |= AW_MMC_CMDR_RESP_RCV;
734 	if (cmd->flags & MMC_RSP_136)
735 		cmdreg |= AW_MMC_CMDR_LONG_RESP;
736 	if (cmd->flags & MMC_RSP_CRC)
737 		cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
738 
739 	if (cmd->data) {
740 		cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
741 
742 		if (cmd->data->flags & MMC_DATA_MULTI) {
743 			cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
744 			imask |= AW_MMC_INT_AUTO_STOP_DONE;
745 			sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
746 		} else {
747 			sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
748 			imask |= AW_MMC_INT_DATA_OVER;
749 		}
750 		if (cmd->data->flags & MMC_DATA_WRITE)
751 			cmdreg |= AW_MMC_CMDR_DIR_WRITE;
752 
753 		blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
754 		AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
755 		AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
756 	} else {
757 		imask |= AW_MMC_INT_CMD_DONE;
758 	}
759 
760 	/* Enable the interrupts we are interested in */
761 	AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask);
762 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
763 
764 	/* Enable auto stop if needed */
765 	AW_MMC_WRITE_4(sc, AW_MMC_A12A,
766 	    cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff);
767 
768 	/* Write the command argument */
769 	AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
770 
771 	/*
772 	 * If we don't have data start the request
773 	 * if we do prepare the dma request and start the request
774 	 */
775 	if (cmd->data == NULL) {
776 		AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
777 	} else {
778 		err = aw_mmc_prepare_dma(sc);
779 		if (err != 0)
780 			device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
781 
782 		AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
783 	}
784 
785 	callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
786 	    aw_mmc_timeout, sc);
787 	AW_MMC_UNLOCK(sc);
788 
789 	return (0);
790 }
791 
792 static int
793 aw_mmc_read_ivar(device_t bus, device_t child, int which,
794     uintptr_t *result)
795 {
796 	struct aw_mmc_softc *sc;
797 
798 	sc = device_get_softc(bus);
799 	switch (which) {
800 	default:
801 		return (EINVAL);
802 	case MMCBR_IVAR_BUS_MODE:
803 		*(int *)result = sc->aw_host.ios.bus_mode;
804 		break;
805 	case MMCBR_IVAR_BUS_WIDTH:
806 		*(int *)result = sc->aw_host.ios.bus_width;
807 		break;
808 	case MMCBR_IVAR_CHIP_SELECT:
809 		*(int *)result = sc->aw_host.ios.chip_select;
810 		break;
811 	case MMCBR_IVAR_CLOCK:
812 		*(int *)result = sc->aw_host.ios.clock;
813 		break;
814 	case MMCBR_IVAR_F_MIN:
815 		*(int *)result = sc->aw_host.f_min;
816 		break;
817 	case MMCBR_IVAR_F_MAX:
818 		*(int *)result = sc->aw_host.f_max;
819 		break;
820 	case MMCBR_IVAR_HOST_OCR:
821 		*(int *)result = sc->aw_host.host_ocr;
822 		break;
823 	case MMCBR_IVAR_MODE:
824 		*(int *)result = sc->aw_host.mode;
825 		break;
826 	case MMCBR_IVAR_OCR:
827 		*(int *)result = sc->aw_host.ocr;
828 		break;
829 	case MMCBR_IVAR_POWER_MODE:
830 		*(int *)result = sc->aw_host.ios.power_mode;
831 		break;
832 	case MMCBR_IVAR_VDD:
833 		*(int *)result = sc->aw_host.ios.vdd;
834 		break;
835 	case MMCBR_IVAR_VCCQ:
836 		*(int *)result = sc->aw_host.ios.vccq;
837 		break;
838 	case MMCBR_IVAR_CAPS:
839 		*(int *)result = sc->aw_host.caps;
840 		break;
841 	case MMCBR_IVAR_TIMING:
842 		*(int *)result = sc->aw_host.ios.timing;
843 		break;
844 	case MMCBR_IVAR_MAX_DATA:
845 		*(int *)result = (sc->aw_mmc_conf->dma_xferlen *
846 		    AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
847 		break;
848 	}
849 
850 	return (0);
851 }
852 
853 static int
854 aw_mmc_write_ivar(device_t bus, device_t child, int which,
855     uintptr_t value)
856 {
857 	struct aw_mmc_softc *sc;
858 
859 	sc = device_get_softc(bus);
860 	switch (which) {
861 	default:
862 		return (EINVAL);
863 	case MMCBR_IVAR_BUS_MODE:
864 		sc->aw_host.ios.bus_mode = value;
865 		break;
866 	case MMCBR_IVAR_BUS_WIDTH:
867 		sc->aw_host.ios.bus_width = value;
868 		break;
869 	case MMCBR_IVAR_CHIP_SELECT:
870 		sc->aw_host.ios.chip_select = value;
871 		break;
872 	case MMCBR_IVAR_CLOCK:
873 		sc->aw_host.ios.clock = value;
874 		break;
875 	case MMCBR_IVAR_MODE:
876 		sc->aw_host.mode = value;
877 		break;
878 	case MMCBR_IVAR_OCR:
879 		sc->aw_host.ocr = value;
880 		break;
881 	case MMCBR_IVAR_POWER_MODE:
882 		sc->aw_host.ios.power_mode = value;
883 		break;
884 	case MMCBR_IVAR_VDD:
885 		sc->aw_host.ios.vdd = value;
886 		break;
887 	case MMCBR_IVAR_VCCQ:
888 		sc->aw_host.ios.vccq = value;
889 		break;
890 	case MMCBR_IVAR_TIMING:
891 		sc->aw_host.ios.timing = value;
892 		break;
893 	/* These are read-only */
894 	case MMCBR_IVAR_CAPS:
895 	case MMCBR_IVAR_HOST_OCR:
896 	case MMCBR_IVAR_F_MIN:
897 	case MMCBR_IVAR_F_MAX:
898 	case MMCBR_IVAR_MAX_DATA:
899 		return (EINVAL);
900 	}
901 
902 	return (0);
903 }
904 
905 static int
906 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
907 {
908 	uint32_t reg;
909 	int retry;
910 
911 	reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
912 	reg &= ~(AW_MMC_CKCR_ENB | AW_MMC_CKCR_LOW_POWER |
913 	    AW_MMC_CKCR_MASK_DATA0);
914 
915 	if (clkon)
916 		reg |= AW_MMC_CKCR_ENB;
917 	if (sc->aw_mmc_conf->mask_data0)
918 		reg |= AW_MMC_CKCR_MASK_DATA0;
919 
920 	AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
921 
922 	reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
923 	    AW_MMC_CMDR_WAIT_PRE_OVER;
924 	AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
925 	retry = 0xfffff;
926 
927 	while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
928 		reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
929 		DELAY(10);
930 	}
931 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
932 
933 	if (reg & AW_MMC_CMDR_LOAD) {
934 		device_printf(sc->aw_dev, "timeout updating clock\n");
935 		return (ETIMEDOUT);
936 	}
937 
938 	if (sc->aw_mmc_conf->mask_data0) {
939 		reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
940 		reg &= ~AW_MMC_CKCR_MASK_DATA0;
941 		AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
942 	}
943 
944 	return (0);
945 }
946 
947 static int
948 aw_mmc_switch_vccq(device_t bus, device_t child)
949 {
950 	struct aw_mmc_softc *sc;
951 	int uvolt, err;
952 
953 	sc = device_get_softc(bus);
954 
955 	if (sc->aw_reg_vqmmc == NULL)
956 		return EOPNOTSUPP;
957 
958 	switch (sc->aw_host.ios.vccq) {
959 	case vccq_180:
960 		uvolt = 1800000;
961 		break;
962 	case vccq_330:
963 		uvolt = 3300000;
964 		break;
965 	default:
966 		return EINVAL;
967 	}
968 
969 	err = regulator_set_voltage(sc->aw_reg_vqmmc, uvolt, uvolt);
970 	if (err != 0) {
971 		device_printf(sc->aw_dev,
972 		    "Cannot set vqmmc to %d<->%d\n",
973 		    uvolt,
974 		    uvolt);
975 		return (err);
976 	}
977 
978 	return (0);
979 }
980 
981 static int
982 aw_mmc_update_ios(device_t bus, device_t child)
983 {
984 	int error;
985 	struct aw_mmc_softc *sc;
986 	struct mmc_ios *ios;
987 	unsigned int clock;
988 	uint32_t reg, div = 1;
989 
990 	sc = device_get_softc(bus);
991 
992 	ios = &sc->aw_host.ios;
993 
994 	/* Set the bus width. */
995 	switch (ios->bus_width) {
996 	case bus_width_1:
997 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
998 		break;
999 	case bus_width_4:
1000 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
1001 		break;
1002 	case bus_width_8:
1003 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
1004 		break;
1005 	}
1006 
1007 	switch (ios->power_mode) {
1008 	case power_on:
1009 		break;
1010 	case power_off:
1011 		if (bootverbose)
1012 			device_printf(sc->aw_dev, "Powering down sd/mmc\n");
1013 
1014 		if (sc->aw_reg_vmmc)
1015 			regulator_disable(sc->aw_reg_vmmc);
1016 		if (sc->aw_reg_vqmmc)
1017 			regulator_disable(sc->aw_reg_vqmmc);
1018 
1019 		aw_mmc_reset(sc);
1020 		break;
1021 	case power_up:
1022 		if (bootverbose)
1023 			device_printf(sc->aw_dev, "Powering up sd/mmc\n");
1024 
1025 		if (sc->aw_reg_vmmc)
1026 			regulator_enable(sc->aw_reg_vmmc);
1027 		if (sc->aw_reg_vqmmc)
1028 			regulator_enable(sc->aw_reg_vqmmc);
1029 		aw_mmc_init(sc);
1030 		break;
1031 	};
1032 
1033 	/* Enable ddr mode if needed */
1034 	reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
1035 	if (ios->timing == bus_timing_uhs_ddr50 ||
1036 	  ios->timing == bus_timing_mmc_ddr52)
1037 		reg |= AW_MMC_GCTL_DDR_MOD_SEL;
1038 	else
1039 		reg &= ~AW_MMC_GCTL_DDR_MOD_SEL;
1040 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
1041 
1042 	if (ios->clock && ios->clock != sc->aw_clock) {
1043 		sc->aw_clock = clock = ios->clock;
1044 
1045 		/* Disable clock */
1046 		error = aw_mmc_update_clock(sc, 0);
1047 		if (error != 0)
1048 			return (error);
1049 
1050 		if (ios->timing == bus_timing_mmc_ddr52 &&
1051 		    (sc->aw_mmc_conf->new_timing ||
1052 		    ios->bus_width == bus_width_8)) {
1053 			div = 2;
1054 			clock <<= 1;
1055 		}
1056 
1057 		/* Reset the divider. */
1058 		reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1059 		reg &= ~AW_MMC_CKCR_DIV;
1060 		reg |= div - 1;
1061 		AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1062 
1063 		/* New timing mode if needed */
1064 		if (sc->aw_mmc_conf->new_timing) {
1065 			reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
1066 			reg |= AW_MMC_NTSR_MODE_SELECT;
1067 			AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
1068 		}
1069 
1070 		/* Set the MMC clock. */
1071 		error = clk_set_freq(sc->aw_clk_mmc, clock,
1072 		    CLK_SET_ROUND_DOWN);
1073 		if (error != 0) {
1074 			device_printf(sc->aw_dev,
1075 			    "failed to set frequency to %u Hz: %d\n",
1076 			    clock, error);
1077 			return (error);
1078 		}
1079 
1080 		if (sc->aw_mmc_conf->can_calibrate)
1081 			AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
1082 
1083 		/* Enable clock. */
1084 		error = aw_mmc_update_clock(sc, 1);
1085 		if (error != 0)
1086 			return (error);
1087 	}
1088 
1089 
1090 	return (0);
1091 }
1092 
1093 static int
1094 aw_mmc_get_ro(device_t bus, device_t child)
1095 {
1096 
1097 	return (0);
1098 }
1099 
1100 static int
1101 aw_mmc_acquire_host(device_t bus, device_t child)
1102 {
1103 	struct aw_mmc_softc *sc;
1104 	int error;
1105 
1106 	sc = device_get_softc(bus);
1107 	AW_MMC_LOCK(sc);
1108 	while (sc->aw_bus_busy) {
1109 		error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1110 		if (error != 0) {
1111 			AW_MMC_UNLOCK(sc);
1112 			return (error);
1113 		}
1114 	}
1115 	sc->aw_bus_busy++;
1116 	AW_MMC_UNLOCK(sc);
1117 
1118 	return (0);
1119 }
1120 
1121 static int
1122 aw_mmc_release_host(device_t bus, device_t child)
1123 {
1124 	struct aw_mmc_softc *sc;
1125 
1126 	sc = device_get_softc(bus);
1127 	AW_MMC_LOCK(sc);
1128 	sc->aw_bus_busy--;
1129 	wakeup(sc);
1130 	AW_MMC_UNLOCK(sc);
1131 
1132 	return (0);
1133 }
1134 
1135 static device_method_t aw_mmc_methods[] = {
1136 	/* Device interface */
1137 	DEVMETHOD(device_probe,		aw_mmc_probe),
1138 	DEVMETHOD(device_attach,	aw_mmc_attach),
1139 	DEVMETHOD(device_detach,	aw_mmc_detach),
1140 
1141 	/* Bus interface */
1142 	DEVMETHOD(bus_read_ivar,	aw_mmc_read_ivar),
1143 	DEVMETHOD(bus_write_ivar,	aw_mmc_write_ivar),
1144 
1145 	/* MMC bridge interface */
1146 	DEVMETHOD(mmcbr_update_ios,	aw_mmc_update_ios),
1147 	DEVMETHOD(mmcbr_request,	aw_mmc_request),
1148 	DEVMETHOD(mmcbr_get_ro,		aw_mmc_get_ro),
1149 	DEVMETHOD(mmcbr_switch_vccq,	aw_mmc_switch_vccq),
1150 	DEVMETHOD(mmcbr_acquire_host,	aw_mmc_acquire_host),
1151 	DEVMETHOD(mmcbr_release_host,	aw_mmc_release_host),
1152 
1153 	DEVMETHOD_END
1154 };
1155 
1156 static devclass_t aw_mmc_devclass;
1157 
1158 static driver_t aw_mmc_driver = {
1159 	"aw_mmc",
1160 	aw_mmc_methods,
1161 	sizeof(struct aw_mmc_softc),
1162 };
1163 
1164 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL,
1165     NULL);
1166 MMC_DECLARE_BRIDGE(aw_mmc);
1167