xref: /freebsd/sys/arm/allwinner/aw_mmc.c (revision 5bf5ca772c6de2d53344a78cf461447cc322ccea)
1 /*-
2  * Copyright (c) 2013 Alexander Fedorov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/resource.h>
39 #include <sys/rman.h>
40 #include <sys/sysctl.h>
41 
42 #include <machine/bus.h>
43 
44 #include <dev/ofw/ofw_bus.h>
45 #include <dev/ofw/ofw_bus_subr.h>
46 
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcbrvar.h>
49 
50 #include <arm/allwinner/aw_mmc.h>
51 #include <dev/extres/clk/clk.h>
52 #include <dev/extres/hwreset/hwreset.h>
53 #include <dev/extres/regulator/regulator.h>
54 
55 #define	AW_MMC_MEMRES		0
56 #define	AW_MMC_IRQRES		1
57 #define	AW_MMC_RESSZ		2
58 #define	AW_MMC_DMA_SEGS		((MAXPHYS / PAGE_SIZE) + 1)
59 #define	AW_MMC_DMA_FTRGLEVEL	0x20070008
60 #define	AW_MMC_RESET_RETRY	1000
61 
62 #define	CARD_ID_FREQUENCY	400000
63 
64 struct aw_mmc_conf {
65 	uint32_t	dma_xferlen;
66 	bool		mask_data0;
67 	bool		can_calibrate;
68 	bool		new_timing;
69 };
70 
71 static const struct aw_mmc_conf a10_mmc_conf = {
72 	.dma_xferlen = 0x2000,
73 };
74 
75 static const struct aw_mmc_conf a13_mmc_conf = {
76 	.dma_xferlen = 0x10000,
77 };
78 
79 static const struct aw_mmc_conf a64_mmc_conf = {
80 	.dma_xferlen = 0x10000,
81 	.mask_data0 = true,
82 	.can_calibrate = true,
83 	.new_timing = true,
84 };
85 
86 static const struct aw_mmc_conf a64_emmc_conf = {
87 	.dma_xferlen = 0x2000,
88 	.can_calibrate = true,
89 };
90 
91 static struct ofw_compat_data compat_data[] = {
92 	{"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
93 	{"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
94 	{"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
95 	{"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
96 	{"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
97 	{NULL,             0}
98 };
99 
100 struct aw_mmc_softc {
101 	device_t		aw_dev;
102 	clk_t			aw_clk_ahb;
103 	clk_t			aw_clk_mmc;
104 	hwreset_t		aw_rst_ahb;
105 	int			aw_bus_busy;
106 	int			aw_resid;
107 	int			aw_timeout;
108 	struct callout		aw_timeoutc;
109 	struct mmc_host		aw_host;
110 	struct mmc_request *	aw_req;
111 	struct mtx		aw_mtx;
112 	struct resource *	aw_res[AW_MMC_RESSZ];
113 	struct aw_mmc_conf *	aw_mmc_conf;
114 	uint32_t		aw_intr;
115 	uint32_t		aw_intr_wait;
116 	void *			aw_intrhand;
117 	int32_t			aw_vdd;
118 	regulator_t		aw_reg_vmmc;
119 	regulator_t		aw_reg_vqmmc;
120 	unsigned int		aw_clock;
121 
122 	/* Fields required for DMA access. */
123 	bus_addr_t	  	aw_dma_desc_phys;
124 	bus_dmamap_t		aw_dma_map;
125 	bus_dma_tag_t 		aw_dma_tag;
126 	void * 			aw_dma_desc;
127 	bus_dmamap_t		aw_dma_buf_map;
128 	bus_dma_tag_t		aw_dma_buf_tag;
129 	int			aw_dma_map_err;
130 };
131 
132 static struct resource_spec aw_mmc_res_spec[] = {
133 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
134 	{ SYS_RES_IRQ,		0,	RF_ACTIVE | RF_SHAREABLE },
135 	{ -1,			0,	0 }
136 };
137 
138 static int aw_mmc_probe(device_t);
139 static int aw_mmc_attach(device_t);
140 static int aw_mmc_detach(device_t);
141 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
142 static int aw_mmc_reset(struct aw_mmc_softc *);
143 static void aw_mmc_intr(void *);
144 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
145 
146 static int aw_mmc_update_ios(device_t, device_t);
147 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
148 static int aw_mmc_get_ro(device_t, device_t);
149 static int aw_mmc_acquire_host(device_t, device_t);
150 static int aw_mmc_release_host(device_t, device_t);
151 
152 #define	AW_MMC_LOCK(_sc)	mtx_lock(&(_sc)->aw_mtx)
153 #define	AW_MMC_UNLOCK(_sc)	mtx_unlock(&(_sc)->aw_mtx)
154 #define	AW_MMC_READ_4(_sc, _reg)					\
155 	bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
156 #define	AW_MMC_WRITE_4(_sc, _reg, _value)				\
157 	bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
158 
159 static int
160 aw_mmc_probe(device_t dev)
161 {
162 
163 	if (!ofw_bus_status_okay(dev))
164 		return (ENXIO);
165 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
166 		return (ENXIO);
167 
168 	device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
169 
170 	return (BUS_PROBE_DEFAULT);
171 }
172 
173 static int
174 aw_mmc_attach(device_t dev)
175 {
176 	device_t child;
177 	struct aw_mmc_softc *sc;
178 	struct sysctl_ctx_list *ctx;
179 	struct sysctl_oid_list *tree;
180 	uint32_t bus_width;
181 	phandle_t node;
182 	int error;
183 
184 	node = ofw_bus_get_node(dev);
185 	sc = device_get_softc(dev);
186 	sc->aw_dev = dev;
187 
188 	sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
189 
190 	sc->aw_req = NULL;
191 	if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
192 		device_printf(dev, "cannot allocate device resources\n");
193 		return (ENXIO);
194 	}
195 	if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
196 	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
197 	    &sc->aw_intrhand)) {
198 		bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
199 		device_printf(dev, "cannot setup interrupt handler\n");
200 		return (ENXIO);
201 	}
202 	mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
203 	    MTX_DEF);
204 	callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
205 
206 	/* De-assert reset */
207 	if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
208 		error = hwreset_deassert(sc->aw_rst_ahb);
209 		if (error != 0) {
210 			device_printf(dev, "cannot de-assert reset\n");
211 			goto fail;
212 		}
213 	}
214 
215 	/* Activate the module clock. */
216 	error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
217 	if (error != 0) {
218 		device_printf(dev, "cannot get ahb clock\n");
219 		goto fail;
220 	}
221 	error = clk_enable(sc->aw_clk_ahb);
222 	if (error != 0) {
223 		device_printf(dev, "cannot enable ahb clock\n");
224 		goto fail;
225 	}
226 	error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
227 	if (error != 0) {
228 		device_printf(dev, "cannot get mmc clock\n");
229 		goto fail;
230 	}
231 	error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
232 	    CLK_SET_ROUND_DOWN);
233 	if (error != 0) {
234 		device_printf(dev, "cannot init mmc clock\n");
235 		goto fail;
236 	}
237 	error = clk_enable(sc->aw_clk_mmc);
238 	if (error != 0) {
239 		device_printf(dev, "cannot enable mmc clock\n");
240 		goto fail;
241 	}
242 
243 	sc->aw_timeout = 10;
244 	ctx = device_get_sysctl_ctx(dev);
245 	tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
246 	SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
247 	    &sc->aw_timeout, 0, "Request timeout in seconds");
248 
249 	/* Hardware reset */
250 	AW_MMC_WRITE_4(sc, AW_MMC_HWRST, 1);
251 	DELAY(100);
252 	AW_MMC_WRITE_4(sc, AW_MMC_HWRST, 0);
253 	DELAY(500);
254 
255 	/* Soft Reset controller. */
256 	if (aw_mmc_reset(sc) != 0) {
257 		device_printf(dev, "cannot reset the controller\n");
258 		goto fail;
259 	}
260 
261 	if (aw_mmc_setup_dma(sc) != 0) {
262 		device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
263 		goto fail;
264 	}
265 
266 	if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
267 		bus_width = 4;
268 
269 	if (regulator_get_by_ofw_property(dev, 0, "vmmc-supply",
270 	    &sc->aw_reg_vmmc) == 0) {
271 		if (bootverbose)
272 			device_printf(dev, "vmmc-supply regulator found\n");
273 		regulator_enable(sc->aw_reg_vmmc);
274 	}
275 	if (regulator_get_by_ofw_property(dev, 0, "vqmmc-supply",
276 	    &sc->aw_reg_vqmmc) == 0 && bootverbose) {
277 		if (bootverbose)
278 			device_printf(dev, "vqmmc-supply regulator found\n");
279 		regulator_enable(sc->aw_reg_vqmmc);
280 	}
281 
282 	sc->aw_host.f_min = 400000;
283 	sc->aw_host.f_max = 52000000;
284 	sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
285 	sc->aw_host.caps = MMC_CAP_HSPEED | MMC_CAP_UHS_SDR12 |
286 			   MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 |
287 			   MMC_CAP_UHS_DDR50 | MMC_CAP_MMC_DDR52;
288 
289 	sc->aw_host.caps |= MMC_CAP_SIGNALING_330 /* | MMC_CAP_SIGNALING_180 */;
290 
291 	if (bus_width >= 4)
292 		sc->aw_host.caps |= MMC_CAP_4_BIT_DATA;
293 	if (bus_width >= 8)
294 		sc->aw_host.caps |= MMC_CAP_8_BIT_DATA;
295 
296 	child = device_add_child(dev, "mmc", -1);
297 	if (child == NULL) {
298 		device_printf(dev, "attaching MMC bus failed!\n");
299 		goto fail;
300 	}
301 	if (device_probe_and_attach(child) != 0) {
302 		device_printf(dev, "attaching MMC child failed!\n");
303 		device_delete_child(dev, child);
304 		goto fail;
305 	}
306 
307 	return (0);
308 
309 fail:
310 	callout_drain(&sc->aw_timeoutc);
311 	mtx_destroy(&sc->aw_mtx);
312 	bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
313 	bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
314 
315 	return (ENXIO);
316 }
317 
318 static int
319 aw_mmc_detach(device_t dev)
320 {
321 
322 	return (EBUSY);
323 }
324 
325 static void
326 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
327 {
328 	struct aw_mmc_softc *sc;
329 
330 	sc = (struct aw_mmc_softc *)arg;
331 	if (err) {
332 		sc->aw_dma_map_err = err;
333 		return;
334 	}
335 	sc->aw_dma_desc_phys = segs[0].ds_addr;
336 }
337 
338 static int
339 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
340 {
341 	int dma_desc_size, error;
342 
343 	/* Allocate the DMA descriptor memory. */
344 	dma_desc_size = sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS;
345 	error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
346 	    AW_MMC_DMA_ALIGN, 0,
347 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
348 	    dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->aw_dma_tag);
349 	if (error)
350 		return (error);
351 	error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
352 	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map);
353 	if (error)
354 		return (error);
355 
356 	error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map,
357 	    sc->aw_dma_desc, dma_desc_size, aw_dma_desc_cb, sc, 0);
358 	if (error)
359 		return (error);
360 	if (sc->aw_dma_map_err)
361 		return (sc->aw_dma_map_err);
362 
363 	/* Create the DMA map for data transfers. */
364 	error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
365 	    AW_MMC_DMA_ALIGN, 0,
366 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
367 	    sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS,
368 	    sc->aw_mmc_conf->dma_xferlen, BUS_DMA_ALLOCNOW, NULL, NULL,
369 	    &sc->aw_dma_buf_tag);
370 	if (error)
371 		return (error);
372 	error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
373 	    &sc->aw_dma_buf_map);
374 	if (error)
375 		return (error);
376 
377 	return (0);
378 }
379 
380 static void
381 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
382 {
383 	int i;
384 	struct aw_mmc_dma_desc *dma_desc;
385 	struct aw_mmc_softc *sc;
386 
387 	sc = (struct aw_mmc_softc *)arg;
388 	sc->aw_dma_map_err = err;
389 
390 	if (err)
391 		return;
392 
393 	dma_desc = sc->aw_dma_desc;
394 	for (i = 0; i < nsegs; i++) {
395 		dma_desc[i].buf_size = segs[i].ds_len;
396 		dma_desc[i].buf_addr = segs[i].ds_addr;
397 		dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
398 		    AW_MMC_DMA_CONFIG_OWN;
399 		if (i == 0)
400 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_FD;
401 		if (i < (nsegs - 1)) {
402 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_DIC;
403 			dma_desc[i].next = sc->aw_dma_desc_phys +
404 			    ((i + 1) * sizeof(struct aw_mmc_dma_desc));
405 		} else {
406 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_LD |
407 			    AW_MMC_DMA_CONFIG_ER;
408 			dma_desc[i].next = 0;
409 		}
410 	}
411 }
412 
413 static int
414 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
415 {
416 	bus_dmasync_op_t sync_op;
417 	int error;
418 	struct mmc_command *cmd;
419 	uint32_t val;
420 
421 	cmd = sc->aw_req->cmd;
422 	if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
423 		return (EFBIG);
424 	error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
425 	    cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
426 	if (error)
427 		return (error);
428 	if (sc->aw_dma_map_err)
429 		return (sc->aw_dma_map_err);
430 
431 	if (cmd->data->flags & MMC_DATA_WRITE)
432 		sync_op = BUS_DMASYNC_PREWRITE;
433 	else
434 		sync_op = BUS_DMASYNC_PREREAD;
435 	bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
436 	bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
437 
438 	/* Enable DMA */
439 	val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
440 	val &= ~AW_MMC_CTRL_FIFO_AC_MOD;
441 	val |= AW_MMC_CTRL_DMA_ENB;
442 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
443 
444 	/* Reset DMA */
445 	val |= AW_MMC_CTRL_DMA_RST;
446 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
447 
448 	AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
449 	AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
450 	    AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
451 
452 	/* Enable RX or TX DMA interrupt */
453 	if (cmd->data->flags & MMC_DATA_WRITE)
454 		val |= AW_MMC_IDST_TX_INT;
455 	else
456 		val |= AW_MMC_IDST_RX_INT;
457 	AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
458 
459 	/* Set DMA descritptor list address */
460 	AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
461 
462 	/* FIFO trigger level */
463 	AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
464 
465 	return (0);
466 }
467 
468 static int
469 aw_mmc_reset(struct aw_mmc_softc *sc)
470 {
471 	int timeout;
472 
473 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, AW_MMC_RESET);
474 	timeout = 1000;
475 	while (--timeout > 0) {
476 		if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_RESET) == 0)
477 			break;
478 		DELAY(100);
479 	}
480 	if (timeout == 0)
481 		return (ETIMEDOUT);
482 
483 	/* Set the timeout. */
484 	AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
485 	    AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
486 	    AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
487 
488 	/* Clear pending interrupts. */
489 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
490 	AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
491 	/* Unmask interrupts. */
492 	AW_MMC_WRITE_4(sc, AW_MMC_IMKR,
493 	    AW_MMC_INT_CMD_DONE | AW_MMC_INT_ERR_BIT |
494 	    AW_MMC_INT_DATA_OVER | AW_MMC_INT_AUTO_STOP_DONE);
495 	/* Enable interrupts and AHB access. */
496 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL,
497 	    AW_MMC_READ_4(sc, AW_MMC_GCTL) | AW_MMC_CTRL_INT_ENB);
498 
499 	return (0);
500 }
501 
502 static void
503 aw_mmc_req_done(struct aw_mmc_softc *sc)
504 {
505 	struct mmc_command *cmd;
506 	struct mmc_request *req;
507 	uint32_t val, mask;
508 	int retry;
509 
510 	cmd = sc->aw_req->cmd;
511 	if (cmd->error != MMC_ERR_NONE) {
512 		/* Reset the FIFO and DMA engines. */
513 		mask = AW_MMC_CTRL_FIFO_RST | AW_MMC_CTRL_DMA_RST;
514 		val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
515 		AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
516 
517 		retry = AW_MMC_RESET_RETRY;
518 		while (--retry > 0) {
519 			val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
520 			if ((val & mask) == 0)
521 				break;
522 			DELAY(10);
523 		}
524 		if (retry == 0)
525 			device_printf(sc->aw_dev,
526 			    "timeout resetting DMA/FIFO\n");
527 		aw_mmc_update_clock(sc, 1);
528 	}
529 
530 	req = sc->aw_req;
531 	callout_stop(&sc->aw_timeoutc);
532 	sc->aw_req = NULL;
533 	sc->aw_intr = 0;
534 	sc->aw_resid = 0;
535 	sc->aw_dma_map_err = 0;
536 	sc->aw_intr_wait = 0;
537 	req->done(req);
538 }
539 
540 static void
541 aw_mmc_req_ok(struct aw_mmc_softc *sc)
542 {
543 	int timeout;
544 	struct mmc_command *cmd;
545 	uint32_t status;
546 
547 	timeout = 1000;
548 	while (--timeout > 0) {
549 		status = AW_MMC_READ_4(sc, AW_MMC_STAR);
550 		if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
551 			break;
552 		DELAY(1000);
553 	}
554 	cmd = sc->aw_req->cmd;
555 	if (timeout == 0) {
556 		cmd->error = MMC_ERR_FAILED;
557 		aw_mmc_req_done(sc);
558 		return;
559 	}
560 	if (cmd->flags & MMC_RSP_PRESENT) {
561 		if (cmd->flags & MMC_RSP_136) {
562 			cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
563 			cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
564 			cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
565 			cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
566 		} else
567 			cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
568 	}
569 	/* All data has been transferred ? */
570 	if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
571 		cmd->error = MMC_ERR_FAILED;
572 	aw_mmc_req_done(sc);
573 }
574 
575 static void
576 aw_mmc_timeout(void *arg)
577 {
578 	struct aw_mmc_softc *sc;
579 
580 	sc = (struct aw_mmc_softc *)arg;
581 	if (sc->aw_req != NULL) {
582 		device_printf(sc->aw_dev, "controller timeout\n");
583 		sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
584 		aw_mmc_req_done(sc);
585 	} else
586 		device_printf(sc->aw_dev,
587 		    "Spurious timeout - no active request\n");
588 }
589 
590 static void
591 aw_mmc_intr(void *arg)
592 {
593 	bus_dmasync_op_t sync_op;
594 	struct aw_mmc_softc *sc;
595 	struct mmc_data *data;
596 	uint32_t idst, imask, rint;
597 
598 	sc = (struct aw_mmc_softc *)arg;
599 	AW_MMC_LOCK(sc);
600 	rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
601 	idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
602 	imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
603 	if (idst == 0 && imask == 0 && rint == 0) {
604 		AW_MMC_UNLOCK(sc);
605 		return;
606 	}
607 #ifdef DEBUG
608 	device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
609 	    idst, imask, rint);
610 #endif
611 	if (sc->aw_req == NULL) {
612 		device_printf(sc->aw_dev,
613 		    "Spurious interrupt - no active request, rint: 0x%08X\n",
614 		    rint);
615 		goto end;
616 	}
617 	if (rint & AW_MMC_INT_ERR_BIT) {
618 		if (bootverbose)
619 			device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
620 		if (rint & AW_MMC_INT_RESP_TIMEOUT)
621 			sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
622 		else
623 			sc->aw_req->cmd->error = MMC_ERR_FAILED;
624 		aw_mmc_req_done(sc);
625 		goto end;
626 	}
627 	if (idst & AW_MMC_IDST_ERROR) {
628 		device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
629 		sc->aw_req->cmd->error = MMC_ERR_FAILED;
630 		aw_mmc_req_done(sc);
631 		goto end;
632 	}
633 
634 	sc->aw_intr |= rint;
635 	data = sc->aw_req->cmd->data;
636 	if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
637 		if (data->flags & MMC_DATA_WRITE)
638 			sync_op = BUS_DMASYNC_POSTWRITE;
639 		else
640 			sync_op = BUS_DMASYNC_POSTREAD;
641 		bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
642 		    sync_op);
643 		bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
644 		    BUS_DMASYNC_POSTWRITE);
645 		bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
646 		sc->aw_resid = data->len >> 2;
647 	}
648 	if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
649 		aw_mmc_req_ok(sc);
650 
651 end:
652 	AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
653 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
654 	AW_MMC_UNLOCK(sc);
655 }
656 
657 static int
658 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
659 {
660 	int blksz;
661 	struct aw_mmc_softc *sc;
662 	struct mmc_command *cmd;
663 	uint32_t cmdreg;
664 	int err;
665 
666 	sc = device_get_softc(bus);
667 	AW_MMC_LOCK(sc);
668 	if (sc->aw_req) {
669 		AW_MMC_UNLOCK(sc);
670 		return (EBUSY);
671 	}
672 	sc->aw_req = req;
673 	cmd = req->cmd;
674 	cmdreg = AW_MMC_CMDR_LOAD;
675 	if (cmd->opcode == MMC_GO_IDLE_STATE)
676 		cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
677 	if (cmd->flags & MMC_RSP_PRESENT)
678 		cmdreg |= AW_MMC_CMDR_RESP_RCV;
679 	if (cmd->flags & MMC_RSP_136)
680 		cmdreg |= AW_MMC_CMDR_LONG_RESP;
681 	if (cmd->flags & MMC_RSP_CRC)
682 		cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
683 
684 	sc->aw_intr = 0;
685 	sc->aw_resid = 0;
686 	sc->aw_intr_wait = AW_MMC_INT_CMD_DONE;
687 	cmd->error = MMC_ERR_NONE;
688 	if (cmd->data != NULL) {
689 		sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
690 		cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
691 		if (cmd->data->flags & MMC_DATA_MULTI) {
692 			cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
693 			sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
694 		}
695 		if (cmd->data->flags & MMC_DATA_WRITE)
696 			cmdreg |= AW_MMC_CMDR_DIR_WRITE;
697 		blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
698 		AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
699 		AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
700 
701 		err = aw_mmc_prepare_dma(sc);
702 		if (err != 0)
703 			device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
704 	}
705 
706 	AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
707 	AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
708 	callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
709 	    aw_mmc_timeout, sc);
710 	AW_MMC_UNLOCK(sc);
711 
712 	return (0);
713 }
714 
715 static int
716 aw_mmc_read_ivar(device_t bus, device_t child, int which,
717     uintptr_t *result)
718 {
719 	struct aw_mmc_softc *sc;
720 
721 	sc = device_get_softc(bus);
722 	switch (which) {
723 	default:
724 		return (EINVAL);
725 	case MMCBR_IVAR_BUS_MODE:
726 		*(int *)result = sc->aw_host.ios.bus_mode;
727 		break;
728 	case MMCBR_IVAR_BUS_WIDTH:
729 		*(int *)result = sc->aw_host.ios.bus_width;
730 		break;
731 	case MMCBR_IVAR_CHIP_SELECT:
732 		*(int *)result = sc->aw_host.ios.chip_select;
733 		break;
734 	case MMCBR_IVAR_CLOCK:
735 		*(int *)result = sc->aw_host.ios.clock;
736 		break;
737 	case MMCBR_IVAR_F_MIN:
738 		*(int *)result = sc->aw_host.f_min;
739 		break;
740 	case MMCBR_IVAR_F_MAX:
741 		*(int *)result = sc->aw_host.f_max;
742 		break;
743 	case MMCBR_IVAR_HOST_OCR:
744 		*(int *)result = sc->aw_host.host_ocr;
745 		break;
746 	case MMCBR_IVAR_MODE:
747 		*(int *)result = sc->aw_host.mode;
748 		break;
749 	case MMCBR_IVAR_OCR:
750 		*(int *)result = sc->aw_host.ocr;
751 		break;
752 	case MMCBR_IVAR_POWER_MODE:
753 		*(int *)result = sc->aw_host.ios.power_mode;
754 		break;
755 	case MMCBR_IVAR_VDD:
756 		*(int *)result = sc->aw_host.ios.vdd;
757 		break;
758 	case MMCBR_IVAR_CAPS:
759 		*(int *)result = sc->aw_host.caps;
760 		break;
761 	case MMCBR_IVAR_TIMING:
762 		*(int *)result = sc->aw_host.ios.timing;
763 		break;
764 	case MMCBR_IVAR_MAX_DATA:
765 		*(int *)result = 65535;
766 		break;
767 	}
768 
769 	return (0);
770 }
771 
772 static int
773 aw_mmc_write_ivar(device_t bus, device_t child, int which,
774     uintptr_t value)
775 {
776 	struct aw_mmc_softc *sc;
777 
778 	sc = device_get_softc(bus);
779 	switch (which) {
780 	default:
781 		return (EINVAL);
782 	case MMCBR_IVAR_BUS_MODE:
783 		sc->aw_host.ios.bus_mode = value;
784 		break;
785 	case MMCBR_IVAR_BUS_WIDTH:
786 		sc->aw_host.ios.bus_width = value;
787 		break;
788 	case MMCBR_IVAR_CHIP_SELECT:
789 		sc->aw_host.ios.chip_select = value;
790 		break;
791 	case MMCBR_IVAR_CLOCK:
792 		sc->aw_host.ios.clock = value;
793 		break;
794 	case MMCBR_IVAR_MODE:
795 		sc->aw_host.mode = value;
796 		break;
797 	case MMCBR_IVAR_OCR:
798 		sc->aw_host.ocr = value;
799 		break;
800 	case MMCBR_IVAR_POWER_MODE:
801 		sc->aw_host.ios.power_mode = value;
802 		break;
803 	case MMCBR_IVAR_VDD:
804 		sc->aw_host.ios.vdd = value;
805 		break;
806 	case MMCBR_IVAR_TIMING:
807 		sc->aw_host.ios.timing = value;
808 		break;
809 	/* These are read-only */
810 	case MMCBR_IVAR_CAPS:
811 	case MMCBR_IVAR_HOST_OCR:
812 	case MMCBR_IVAR_F_MIN:
813 	case MMCBR_IVAR_F_MAX:
814 	case MMCBR_IVAR_MAX_DATA:
815 		return (EINVAL);
816 	}
817 
818 	return (0);
819 }
820 
821 static int
822 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
823 {
824 	uint32_t reg;
825 	int retry;
826 
827 	reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
828 	reg &= ~(AW_MMC_CKCR_CCLK_ENB | AW_MMC_CKCR_CCLK_CTRL |
829 	    AW_MMC_CKCR_CCLK_MASK_DATA0);
830 
831 	if (clkon)
832 		reg |= AW_MMC_CKCR_CCLK_ENB;
833 	if (sc->aw_mmc_conf->mask_data0)
834 		reg |= AW_MMC_CKCR_CCLK_MASK_DATA0;
835 
836 	AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
837 
838 	reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
839 	    AW_MMC_CMDR_WAIT_PRE_OVER;
840 	AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
841 	retry = 0xfffff;
842 
843 	while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
844 		reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
845 		DELAY(10);
846 	}
847 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
848 
849 	if (reg & AW_MMC_CMDR_LOAD) {
850 		device_printf(sc->aw_dev, "timeout updating clock\n");
851 		return (ETIMEDOUT);
852 	}
853 
854 	if (sc->aw_mmc_conf->mask_data0) {
855 		reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
856 		reg &= ~AW_MMC_CKCR_CCLK_MASK_DATA0;
857 		AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
858 	}
859 
860 	return (0);
861 }
862 
863 static void
864 aw_mmc_set_power(struct aw_mmc_softc *sc, int32_t vdd)
865 {
866 	int min_uvolt, max_uvolt;
867 
868 	sc->aw_vdd = vdd;
869 
870 	if (sc->aw_reg_vqmmc == NULL)
871 		return;
872 
873 	switch (1 << vdd) {
874 	case MMC_OCR_LOW_VOLTAGE:
875 		min_uvolt = max_uvolt = 1800000;
876 		break;
877 	case MMC_OCR_320_330:
878 		min_uvolt = 3200000;
879 		max_uvolt = 3300000;
880 		break;
881 	case MMC_OCR_330_340:
882 		min_uvolt = 3300000;
883 		max_uvolt = 3400000;
884 		break;
885 	}
886 
887 	if (sc->aw_reg_vqmmc)
888 		if (regulator_set_voltage(sc->aw_reg_vqmmc,
889 		    min_uvolt, max_uvolt) != 0)
890 			device_printf(sc->aw_dev,
891 			    "Cannot set vqmmc to %d<->%d\n",
892 			    min_uvolt,
893 			    max_uvolt);
894 }
895 
896 static int
897 aw_mmc_update_ios(device_t bus, device_t child)
898 {
899 	int error;
900 	struct aw_mmc_softc *sc;
901 	struct mmc_ios *ios;
902 	unsigned int clock;
903 	uint32_t reg, div = 1;
904 
905 	sc = device_get_softc(bus);
906 
907 	ios = &sc->aw_host.ios;
908 
909 	/* Set the bus width. */
910 	switch (ios->bus_width) {
911 	case bus_width_1:
912 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
913 		break;
914 	case bus_width_4:
915 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
916 		break;
917 	case bus_width_8:
918 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
919 		break;
920 	}
921 
922 	/* Set the voltage */
923 	if (ios->power_mode == power_off) {
924 		if (bootverbose)
925 			device_printf(sc->aw_dev, "Powering down sd/mmc\n");
926 		if (sc->aw_reg_vmmc)
927 			regulator_disable(sc->aw_reg_vmmc);
928 		if (sc->aw_reg_vqmmc)
929 			regulator_disable(sc->aw_reg_vqmmc);
930 	} else if (sc->aw_vdd != ios->vdd)
931 		aw_mmc_set_power(sc, ios->vdd);
932 
933 	/* Enable ddr mode if needed */
934 	reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
935 	if (ios->timing == bus_timing_uhs_ddr50 ||
936 	  ios->timing == bus_timing_mmc_ddr52)
937 		reg |= AW_MMC_CTRL_DDR_MOD_SEL;
938 	else
939 		reg &= ~AW_MMC_CTRL_DDR_MOD_SEL;
940 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
941 
942 	if (ios->clock && ios->clock != sc->aw_clock) {
943 		sc->aw_clock = clock = ios->clock;
944 
945 		/* Disable clock */
946 		error = aw_mmc_update_clock(sc, 0);
947 		if (error != 0)
948 			return (error);
949 
950 		if (ios->timing == bus_timing_mmc_ddr52 &&
951 		    (sc->aw_mmc_conf->new_timing ||
952 		    ios->bus_width == bus_width_8)) {
953 			div = 2;
954 			clock <<= 1;
955 		}
956 
957 		/* Reset the divider. */
958 		reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
959 		reg &= ~AW_MMC_CKCR_CCLK_DIV;
960 		reg |= div - 1;
961 		AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
962 
963 		/* New timing mode if needed */
964 		if (sc->aw_mmc_conf->new_timing) {
965 			reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
966 			reg |= AW_MMC_NTSR_MODE_SELECT;
967 			AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
968 		}
969 
970 		/* Set the MMC clock. */
971 		error = clk_set_freq(sc->aw_clk_mmc, clock,
972 		    CLK_SET_ROUND_DOWN);
973 		if (error != 0) {
974 			device_printf(sc->aw_dev,
975 			    "failed to set frequency to %u Hz: %d\n",
976 			    clock, error);
977 			return (error);
978 		}
979 
980 		if (sc->aw_mmc_conf->can_calibrate)
981 			AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
982 
983 		/* Enable clock. */
984 		error = aw_mmc_update_clock(sc, 1);
985 		if (error != 0)
986 			return (error);
987 	}
988 
989 
990 	return (0);
991 }
992 
993 static int
994 aw_mmc_get_ro(device_t bus, device_t child)
995 {
996 
997 	return (0);
998 }
999 
1000 static int
1001 aw_mmc_acquire_host(device_t bus, device_t child)
1002 {
1003 	struct aw_mmc_softc *sc;
1004 	int error;
1005 
1006 	sc = device_get_softc(bus);
1007 	AW_MMC_LOCK(sc);
1008 	while (sc->aw_bus_busy) {
1009 		error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1010 		if (error != 0) {
1011 			AW_MMC_UNLOCK(sc);
1012 			return (error);
1013 		}
1014 	}
1015 	sc->aw_bus_busy++;
1016 	AW_MMC_UNLOCK(sc);
1017 
1018 	return (0);
1019 }
1020 
1021 static int
1022 aw_mmc_release_host(device_t bus, device_t child)
1023 {
1024 	struct aw_mmc_softc *sc;
1025 
1026 	sc = device_get_softc(bus);
1027 	AW_MMC_LOCK(sc);
1028 	sc->aw_bus_busy--;
1029 	wakeup(sc);
1030 	AW_MMC_UNLOCK(sc);
1031 
1032 	return (0);
1033 }
1034 
1035 static device_method_t aw_mmc_methods[] = {
1036 	/* Device interface */
1037 	DEVMETHOD(device_probe,		aw_mmc_probe),
1038 	DEVMETHOD(device_attach,	aw_mmc_attach),
1039 	DEVMETHOD(device_detach,	aw_mmc_detach),
1040 
1041 	/* Bus interface */
1042 	DEVMETHOD(bus_read_ivar,	aw_mmc_read_ivar),
1043 	DEVMETHOD(bus_write_ivar,	aw_mmc_write_ivar),
1044 
1045 	/* MMC bridge interface */
1046 	DEVMETHOD(mmcbr_update_ios,	aw_mmc_update_ios),
1047 	DEVMETHOD(mmcbr_request,	aw_mmc_request),
1048 	DEVMETHOD(mmcbr_get_ro,		aw_mmc_get_ro),
1049 	DEVMETHOD(mmcbr_acquire_host,	aw_mmc_acquire_host),
1050 	DEVMETHOD(mmcbr_release_host,	aw_mmc_release_host),
1051 
1052 	DEVMETHOD_END
1053 };
1054 
1055 static devclass_t aw_mmc_devclass;
1056 
1057 static driver_t aw_mmc_driver = {
1058 	"aw_mmc",
1059 	aw_mmc_methods,
1060 	sizeof(struct aw_mmc_softc),
1061 };
1062 
1063 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL,
1064     NULL);
1065 MMC_DECLARE_BRIDGE(aw_mmc);
1066