xref: /freebsd/sys/arm/allwinner/aw_mmc.c (revision b37f6c9805edb4b89f0a8c2b78f78a3dcfc0647b)
1 /*-
2  * Copyright (c) 2013 Alexander Fedorov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/resource.h>
39 #include <sys/rman.h>
40 #include <sys/sysctl.h>
41 
42 #include <machine/bus.h>
43 
44 #include <dev/ofw/ofw_bus.h>
45 #include <dev/ofw/ofw_bus_subr.h>
46 
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcbrvar.h>
49 
50 #include <arm/allwinner/aw_mmc.h>
51 #include <dev/extres/clk/clk.h>
52 #include <dev/extres/hwreset/hwreset.h>
53 #include <dev/extres/regulator/regulator.h>
54 
55 #define	AW_MMC_MEMRES		0
56 #define	AW_MMC_IRQRES		1
57 #define	AW_MMC_RESSZ		2
58 #define	AW_MMC_DMA_SEGS		((MAXPHYS / PAGE_SIZE) + 1)
59 #define	AW_MMC_DMA_FTRGLEVEL	0x20070008
60 #define	AW_MMC_RESET_RETRY	1000
61 
62 #define	CARD_ID_FREQUENCY	400000
63 
64 struct aw_mmc_conf {
65 	uint32_t	dma_xferlen;
66 	bool		mask_data0;
67 	bool		can_calibrate;
68 	bool		new_timing;
69 };
70 
71 static const struct aw_mmc_conf a10_mmc_conf = {
72 	.dma_xferlen = 0x2000,
73 };
74 
75 static const struct aw_mmc_conf a13_mmc_conf = {
76 	.dma_xferlen = 0x10000,
77 };
78 
79 static const struct aw_mmc_conf a64_mmc_conf = {
80 	.dma_xferlen = 0x10000,
81 	.mask_data0 = true,
82 	.can_calibrate = true,
83 	.new_timing = true,
84 };
85 
86 static const struct aw_mmc_conf a64_emmc_conf = {
87 	.dma_xferlen = 0x2000,
88 	.can_calibrate = true,
89 };
90 
91 static struct ofw_compat_data compat_data[] = {
92 	{"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
93 	{"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
94 	{"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
95 	{"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
96 	{"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
97 	{NULL,             0}
98 };
99 
100 struct aw_mmc_softc {
101 	device_t		aw_dev;
102 	clk_t			aw_clk_ahb;
103 	clk_t			aw_clk_mmc;
104 	hwreset_t		aw_rst_ahb;
105 	int			aw_bus_busy;
106 	int			aw_resid;
107 	int			aw_timeout;
108 	struct callout		aw_timeoutc;
109 	struct mmc_host		aw_host;
110 	struct mmc_request *	aw_req;
111 	struct mtx		aw_mtx;
112 	struct resource *	aw_res[AW_MMC_RESSZ];
113 	struct aw_mmc_conf *	aw_mmc_conf;
114 	uint32_t		aw_intr;
115 	uint32_t		aw_intr_wait;
116 	void *			aw_intrhand;
117 	int32_t			aw_vdd;
118 	regulator_t		aw_reg_vmmc;
119 	regulator_t		aw_reg_vqmmc;
120 
121 	/* Fields required for DMA access. */
122 	bus_addr_t	  	aw_dma_desc_phys;
123 	bus_dmamap_t		aw_dma_map;
124 	bus_dma_tag_t 		aw_dma_tag;
125 	void * 			aw_dma_desc;
126 	bus_dmamap_t		aw_dma_buf_map;
127 	bus_dma_tag_t		aw_dma_buf_tag;
128 	int			aw_dma_map_err;
129 };
130 
131 static struct resource_spec aw_mmc_res_spec[] = {
132 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
133 	{ SYS_RES_IRQ,		0,	RF_ACTIVE | RF_SHAREABLE },
134 	{ -1,			0,	0 }
135 };
136 
137 static int aw_mmc_probe(device_t);
138 static int aw_mmc_attach(device_t);
139 static int aw_mmc_detach(device_t);
140 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
141 static int aw_mmc_reset(struct aw_mmc_softc *);
142 static void aw_mmc_intr(void *);
143 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
144 
145 static int aw_mmc_update_ios(device_t, device_t);
146 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
147 static int aw_mmc_get_ro(device_t, device_t);
148 static int aw_mmc_acquire_host(device_t, device_t);
149 static int aw_mmc_release_host(device_t, device_t);
150 
151 #define	AW_MMC_LOCK(_sc)	mtx_lock(&(_sc)->aw_mtx)
152 #define	AW_MMC_UNLOCK(_sc)	mtx_unlock(&(_sc)->aw_mtx)
153 #define	AW_MMC_READ_4(_sc, _reg)					\
154 	bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
155 #define	AW_MMC_WRITE_4(_sc, _reg, _value)				\
156 	bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
157 
158 static int
159 aw_mmc_probe(device_t dev)
160 {
161 
162 	if (!ofw_bus_status_okay(dev))
163 		return (ENXIO);
164 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
165 		return (ENXIO);
166 
167 	device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
168 
169 	return (BUS_PROBE_DEFAULT);
170 }
171 
172 static int
173 aw_mmc_attach(device_t dev)
174 {
175 	device_t child;
176 	struct aw_mmc_softc *sc;
177 	struct sysctl_ctx_list *ctx;
178 	struct sysctl_oid_list *tree;
179 	uint32_t bus_width;
180 	phandle_t node;
181 	int error;
182 
183 	node = ofw_bus_get_node(dev);
184 	sc = device_get_softc(dev);
185 	sc->aw_dev = dev;
186 
187 	sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
188 
189 	sc->aw_req = NULL;
190 	if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
191 		device_printf(dev, "cannot allocate device resources\n");
192 		return (ENXIO);
193 	}
194 	if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
195 	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
196 	    &sc->aw_intrhand)) {
197 		bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
198 		device_printf(dev, "cannot setup interrupt handler\n");
199 		return (ENXIO);
200 	}
201 	mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
202 	    MTX_DEF);
203 	callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
204 
205 	/* De-assert reset */
206 	if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
207 		error = hwreset_deassert(sc->aw_rst_ahb);
208 		if (error != 0) {
209 			device_printf(dev, "cannot de-assert reset\n");
210 			goto fail;
211 		}
212 	}
213 
214 	/* Activate the module clock. */
215 	error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
216 	if (error != 0) {
217 		device_printf(dev, "cannot get ahb clock\n");
218 		goto fail;
219 	}
220 	error = clk_enable(sc->aw_clk_ahb);
221 	if (error != 0) {
222 		device_printf(dev, "cannot enable ahb clock\n");
223 		goto fail;
224 	}
225 	error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
226 	if (error != 0) {
227 		device_printf(dev, "cannot get mmc clock\n");
228 		goto fail;
229 	}
230 	error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
231 	    CLK_SET_ROUND_DOWN);
232 	if (error != 0) {
233 		device_printf(dev, "cannot init mmc clock\n");
234 		goto fail;
235 	}
236 	error = clk_enable(sc->aw_clk_mmc);
237 	if (error != 0) {
238 		device_printf(dev, "cannot enable mmc clock\n");
239 		goto fail;
240 	}
241 
242 	sc->aw_timeout = 10;
243 	ctx = device_get_sysctl_ctx(dev);
244 	tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
245 	SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
246 	    &sc->aw_timeout, 0, "Request timeout in seconds");
247 
248 	/* Hardware reset */
249 	AW_MMC_WRITE_4(sc, AW_MMC_HWRST, 1);
250 	DELAY(100);
251 	AW_MMC_WRITE_4(sc, AW_MMC_HWRST, 0);
252 	DELAY(500);
253 
254 	/* Soft Reset controller. */
255 	if (aw_mmc_reset(sc) != 0) {
256 		device_printf(dev, "cannot reset the controller\n");
257 		goto fail;
258 	}
259 
260 	if (aw_mmc_setup_dma(sc) != 0) {
261 		device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
262 		goto fail;
263 	}
264 
265 	if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
266 		bus_width = 4;
267 
268 	if (regulator_get_by_ofw_property(dev, 0, "vmmc-supply",
269 	    &sc->aw_reg_vmmc) == 0 && bootverbose)
270 		device_printf(dev, "vmmc-supply regulator found\n");
271 	if (regulator_get_by_ofw_property(dev, 0, "vqmmc-supply",
272 	    &sc->aw_reg_vqmmc) == 0 && bootverbose)
273 		device_printf(dev, "vqmmc-supply regulator found\n");
274 
275 	sc->aw_host.f_min = 400000;
276 	sc->aw_host.f_max = 52000000;
277 	sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
278 	sc->aw_host.caps = MMC_CAP_HSPEED | MMC_CAP_UHS_SDR12 |
279 			   MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 |
280 			   MMC_CAP_UHS_DDR50 | MMC_CAP_MMC_DDR52;
281 
282 	sc->aw_host.caps |= MMC_CAP_SIGNALING_330 /* | MMC_CAP_SIGNALING_180 */;
283 
284 	if (bus_width >= 4)
285 		sc->aw_host.caps |= MMC_CAP_4_BIT_DATA;
286 	if (bus_width >= 8)
287 		sc->aw_host.caps |= MMC_CAP_8_BIT_DATA;
288 
289 	child = device_add_child(dev, "mmc", -1);
290 	if (child == NULL) {
291 		device_printf(dev, "attaching MMC bus failed!\n");
292 		goto fail;
293 	}
294 	if (device_probe_and_attach(child) != 0) {
295 		device_printf(dev, "attaching MMC child failed!\n");
296 		device_delete_child(dev, child);
297 		goto fail;
298 	}
299 
300 	return (0);
301 
302 fail:
303 	callout_drain(&sc->aw_timeoutc);
304 	mtx_destroy(&sc->aw_mtx);
305 	bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
306 	bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
307 
308 	return (ENXIO);
309 }
310 
311 static int
312 aw_mmc_detach(device_t dev)
313 {
314 
315 	return (EBUSY);
316 }
317 
318 static void
319 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
320 {
321 	struct aw_mmc_softc *sc;
322 
323 	sc = (struct aw_mmc_softc *)arg;
324 	if (err) {
325 		sc->aw_dma_map_err = err;
326 		return;
327 	}
328 	sc->aw_dma_desc_phys = segs[0].ds_addr;
329 }
330 
331 static int
332 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
333 {
334 	int dma_desc_size, error;
335 
336 	/* Allocate the DMA descriptor memory. */
337 	dma_desc_size = sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS;
338 	error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
339 	    AW_MMC_DMA_ALIGN, 0,
340 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
341 	    dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->aw_dma_tag);
342 	if (error)
343 		return (error);
344 	error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
345 	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map);
346 	if (error)
347 		return (error);
348 
349 	error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map,
350 	    sc->aw_dma_desc, dma_desc_size, aw_dma_desc_cb, sc, 0);
351 	if (error)
352 		return (error);
353 	if (sc->aw_dma_map_err)
354 		return (sc->aw_dma_map_err);
355 
356 	/* Create the DMA map for data transfers. */
357 	error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
358 	    AW_MMC_DMA_ALIGN, 0,
359 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
360 	    sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS,
361 	    sc->aw_mmc_conf->dma_xferlen, BUS_DMA_ALLOCNOW, NULL, NULL,
362 	    &sc->aw_dma_buf_tag);
363 	if (error)
364 		return (error);
365 	error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
366 	    &sc->aw_dma_buf_map);
367 	if (error)
368 		return (error);
369 
370 	return (0);
371 }
372 
373 static void
374 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
375 {
376 	int i;
377 	struct aw_mmc_dma_desc *dma_desc;
378 	struct aw_mmc_softc *sc;
379 
380 	sc = (struct aw_mmc_softc *)arg;
381 	sc->aw_dma_map_err = err;
382 
383 	if (err)
384 		return;
385 
386 	dma_desc = sc->aw_dma_desc;
387 	for (i = 0; i < nsegs; i++) {
388 		dma_desc[i].buf_size = segs[i].ds_len;
389 		dma_desc[i].buf_addr = segs[i].ds_addr;
390 		dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
391 		    AW_MMC_DMA_CONFIG_OWN;
392 		if (i == 0)
393 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_FD;
394 		if (i < (nsegs - 1)) {
395 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_DIC;
396 			dma_desc[i].next = sc->aw_dma_desc_phys +
397 			    ((i + 1) * sizeof(struct aw_mmc_dma_desc));
398 		} else {
399 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_LD |
400 			    AW_MMC_DMA_CONFIG_ER;
401 			dma_desc[i].next = 0;
402 		}
403 	}
404 }
405 
406 static int
407 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
408 {
409 	bus_dmasync_op_t sync_op;
410 	int error;
411 	struct mmc_command *cmd;
412 	uint32_t val;
413 
414 	cmd = sc->aw_req->cmd;
415 	if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
416 		return (EFBIG);
417 	error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
418 	    cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
419 	if (error)
420 		return (error);
421 	if (sc->aw_dma_map_err)
422 		return (sc->aw_dma_map_err);
423 
424 	if (cmd->data->flags & MMC_DATA_WRITE)
425 		sync_op = BUS_DMASYNC_PREWRITE;
426 	else
427 		sync_op = BUS_DMASYNC_PREREAD;
428 	bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
429 	bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
430 
431 	/* Enable DMA */
432 	val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
433 	val &= ~AW_MMC_CTRL_FIFO_AC_MOD;
434 	val |= AW_MMC_CTRL_DMA_ENB;
435 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
436 
437 	/* Reset DMA */
438 	val |= AW_MMC_CTRL_DMA_RST;
439 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
440 
441 	AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
442 	AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
443 	    AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
444 
445 	/* Enable RX or TX DMA interrupt */
446 	if (cmd->data->flags & MMC_DATA_WRITE)
447 		val |= AW_MMC_IDST_TX_INT;
448 	else
449 		val |= AW_MMC_IDST_RX_INT;
450 	AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
451 
452 	/* Set DMA descritptor list address */
453 	AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
454 
455 	/* FIFO trigger level */
456 	AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
457 
458 	return (0);
459 }
460 
461 static int
462 aw_mmc_reset(struct aw_mmc_softc *sc)
463 {
464 	int timeout;
465 
466 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, AW_MMC_RESET);
467 	timeout = 1000;
468 	while (--timeout > 0) {
469 		if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_RESET) == 0)
470 			break;
471 		DELAY(100);
472 	}
473 	if (timeout == 0)
474 		return (ETIMEDOUT);
475 
476 	/* Set the timeout. */
477 	AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
478 	    AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
479 	    AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
480 
481 	/* Clear pending interrupts. */
482 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
483 	AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
484 	/* Unmask interrupts. */
485 	AW_MMC_WRITE_4(sc, AW_MMC_IMKR,
486 	    AW_MMC_INT_CMD_DONE | AW_MMC_INT_ERR_BIT |
487 	    AW_MMC_INT_DATA_OVER | AW_MMC_INT_AUTO_STOP_DONE);
488 	/* Enable interrupts and AHB access. */
489 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL,
490 	    AW_MMC_READ_4(sc, AW_MMC_GCTL) | AW_MMC_CTRL_INT_ENB);
491 
492 	return (0);
493 }
494 
495 static void
496 aw_mmc_req_done(struct aw_mmc_softc *sc)
497 {
498 	struct mmc_command *cmd;
499 	struct mmc_request *req;
500 	uint32_t val, mask;
501 	int retry;
502 
503 	cmd = sc->aw_req->cmd;
504 	if (cmd->error != MMC_ERR_NONE) {
505 		/* Reset the FIFO and DMA engines. */
506 		mask = AW_MMC_CTRL_FIFO_RST | AW_MMC_CTRL_DMA_RST;
507 		val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
508 		AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
509 
510 		retry = AW_MMC_RESET_RETRY;
511 		while (--retry > 0) {
512 			val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
513 			if ((val & mask) == 0)
514 				break;
515 			DELAY(10);
516 		}
517 		if (retry == 0)
518 			device_printf(sc->aw_dev,
519 			    "timeout resetting DMA/FIFO\n");
520 		aw_mmc_update_clock(sc, 1);
521 	}
522 
523 	req = sc->aw_req;
524 	callout_stop(&sc->aw_timeoutc);
525 	sc->aw_req = NULL;
526 	sc->aw_intr = 0;
527 	sc->aw_resid = 0;
528 	sc->aw_dma_map_err = 0;
529 	sc->aw_intr_wait = 0;
530 	req->done(req);
531 }
532 
533 static void
534 aw_mmc_req_ok(struct aw_mmc_softc *sc)
535 {
536 	int timeout;
537 	struct mmc_command *cmd;
538 	uint32_t status;
539 
540 	timeout = 1000;
541 	while (--timeout > 0) {
542 		status = AW_MMC_READ_4(sc, AW_MMC_STAR);
543 		if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
544 			break;
545 		DELAY(1000);
546 	}
547 	cmd = sc->aw_req->cmd;
548 	if (timeout == 0) {
549 		cmd->error = MMC_ERR_FAILED;
550 		aw_mmc_req_done(sc);
551 		return;
552 	}
553 	if (cmd->flags & MMC_RSP_PRESENT) {
554 		if (cmd->flags & MMC_RSP_136) {
555 			cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
556 			cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
557 			cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
558 			cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
559 		} else
560 			cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
561 	}
562 	/* All data has been transferred ? */
563 	if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
564 		cmd->error = MMC_ERR_FAILED;
565 	aw_mmc_req_done(sc);
566 }
567 
568 static void
569 aw_mmc_timeout(void *arg)
570 {
571 	struct aw_mmc_softc *sc;
572 
573 	sc = (struct aw_mmc_softc *)arg;
574 	if (sc->aw_req != NULL) {
575 		device_printf(sc->aw_dev, "controller timeout\n");
576 		sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
577 		aw_mmc_req_done(sc);
578 	} else
579 		device_printf(sc->aw_dev,
580 		    "Spurious timeout - no active request\n");
581 }
582 
583 static void
584 aw_mmc_intr(void *arg)
585 {
586 	bus_dmasync_op_t sync_op;
587 	struct aw_mmc_softc *sc;
588 	struct mmc_data *data;
589 	uint32_t idst, imask, rint;
590 
591 	sc = (struct aw_mmc_softc *)arg;
592 	AW_MMC_LOCK(sc);
593 	rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
594 	idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
595 	imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
596 	if (idst == 0 && imask == 0 && rint == 0) {
597 		AW_MMC_UNLOCK(sc);
598 		return;
599 	}
600 #ifdef DEBUG
601 	device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
602 	    idst, imask, rint);
603 #endif
604 	if (sc->aw_req == NULL) {
605 		device_printf(sc->aw_dev,
606 		    "Spurious interrupt - no active request, rint: 0x%08X\n",
607 		    rint);
608 		goto end;
609 	}
610 	if (rint & AW_MMC_INT_ERR_BIT) {
611 		if (bootverbose)
612 			device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
613 		if (rint & AW_MMC_INT_RESP_TIMEOUT)
614 			sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
615 		else
616 			sc->aw_req->cmd->error = MMC_ERR_FAILED;
617 		aw_mmc_req_done(sc);
618 		goto end;
619 	}
620 	if (idst & AW_MMC_IDST_ERROR) {
621 		device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
622 		sc->aw_req->cmd->error = MMC_ERR_FAILED;
623 		aw_mmc_req_done(sc);
624 		goto end;
625 	}
626 
627 	sc->aw_intr |= rint;
628 	data = sc->aw_req->cmd->data;
629 	if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
630 		if (data->flags & MMC_DATA_WRITE)
631 			sync_op = BUS_DMASYNC_POSTWRITE;
632 		else
633 			sync_op = BUS_DMASYNC_POSTREAD;
634 		bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
635 		    sync_op);
636 		bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
637 		    BUS_DMASYNC_POSTWRITE);
638 		bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
639 		sc->aw_resid = data->len >> 2;
640 	}
641 	if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
642 		aw_mmc_req_ok(sc);
643 
644 end:
645 	AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
646 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
647 	AW_MMC_UNLOCK(sc);
648 }
649 
650 static int
651 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
652 {
653 	int blksz;
654 	struct aw_mmc_softc *sc;
655 	struct mmc_command *cmd;
656 	uint32_t cmdreg;
657 	int err;
658 
659 	sc = device_get_softc(bus);
660 	AW_MMC_LOCK(sc);
661 	if (sc->aw_req) {
662 		AW_MMC_UNLOCK(sc);
663 		return (EBUSY);
664 	}
665 	sc->aw_req = req;
666 	cmd = req->cmd;
667 	cmdreg = AW_MMC_CMDR_LOAD;
668 	if (cmd->opcode == MMC_GO_IDLE_STATE)
669 		cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
670 	if (cmd->flags & MMC_RSP_PRESENT)
671 		cmdreg |= AW_MMC_CMDR_RESP_RCV;
672 	if (cmd->flags & MMC_RSP_136)
673 		cmdreg |= AW_MMC_CMDR_LONG_RESP;
674 	if (cmd->flags & MMC_RSP_CRC)
675 		cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
676 
677 	sc->aw_intr = 0;
678 	sc->aw_resid = 0;
679 	sc->aw_intr_wait = AW_MMC_INT_CMD_DONE;
680 	cmd->error = MMC_ERR_NONE;
681 	if (cmd->data != NULL) {
682 		sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
683 		cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
684 		if (cmd->data->flags & MMC_DATA_MULTI) {
685 			cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
686 			sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
687 		}
688 		if (cmd->data->flags & MMC_DATA_WRITE)
689 			cmdreg |= AW_MMC_CMDR_DIR_WRITE;
690 		blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
691 		AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
692 		AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
693 
694 		err = aw_mmc_prepare_dma(sc);
695 		if (err != 0)
696 			device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
697 	}
698 
699 	AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
700 	AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
701 	callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
702 	    aw_mmc_timeout, sc);
703 	AW_MMC_UNLOCK(sc);
704 
705 	return (0);
706 }
707 
708 static int
709 aw_mmc_read_ivar(device_t bus, device_t child, int which,
710     uintptr_t *result)
711 {
712 	struct aw_mmc_softc *sc;
713 
714 	sc = device_get_softc(bus);
715 	switch (which) {
716 	default:
717 		return (EINVAL);
718 	case MMCBR_IVAR_BUS_MODE:
719 		*(int *)result = sc->aw_host.ios.bus_mode;
720 		break;
721 	case MMCBR_IVAR_BUS_WIDTH:
722 		*(int *)result = sc->aw_host.ios.bus_width;
723 		break;
724 	case MMCBR_IVAR_CHIP_SELECT:
725 		*(int *)result = sc->aw_host.ios.chip_select;
726 		break;
727 	case MMCBR_IVAR_CLOCK:
728 		*(int *)result = sc->aw_host.ios.clock;
729 		break;
730 	case MMCBR_IVAR_F_MIN:
731 		*(int *)result = sc->aw_host.f_min;
732 		break;
733 	case MMCBR_IVAR_F_MAX:
734 		*(int *)result = sc->aw_host.f_max;
735 		break;
736 	case MMCBR_IVAR_HOST_OCR:
737 		*(int *)result = sc->aw_host.host_ocr;
738 		break;
739 	case MMCBR_IVAR_MODE:
740 		*(int *)result = sc->aw_host.mode;
741 		break;
742 	case MMCBR_IVAR_OCR:
743 		*(int *)result = sc->aw_host.ocr;
744 		break;
745 	case MMCBR_IVAR_POWER_MODE:
746 		*(int *)result = sc->aw_host.ios.power_mode;
747 		break;
748 	case MMCBR_IVAR_VDD:
749 		*(int *)result = sc->aw_host.ios.vdd;
750 		break;
751 	case MMCBR_IVAR_CAPS:
752 		*(int *)result = sc->aw_host.caps;
753 		break;
754 	case MMCBR_IVAR_TIMING:
755 		*(int *)result = sc->aw_host.ios.timing;
756 		break;
757 	case MMCBR_IVAR_MAX_DATA:
758 		*(int *)result = 65535;
759 		break;
760 	}
761 
762 	return (0);
763 }
764 
765 static int
766 aw_mmc_write_ivar(device_t bus, device_t child, int which,
767     uintptr_t value)
768 {
769 	struct aw_mmc_softc *sc;
770 
771 	sc = device_get_softc(bus);
772 	switch (which) {
773 	default:
774 		return (EINVAL);
775 	case MMCBR_IVAR_BUS_MODE:
776 		sc->aw_host.ios.bus_mode = value;
777 		break;
778 	case MMCBR_IVAR_BUS_WIDTH:
779 		sc->aw_host.ios.bus_width = value;
780 		break;
781 	case MMCBR_IVAR_CHIP_SELECT:
782 		sc->aw_host.ios.chip_select = value;
783 		break;
784 	case MMCBR_IVAR_CLOCK:
785 		sc->aw_host.ios.clock = value;
786 		break;
787 	case MMCBR_IVAR_MODE:
788 		sc->aw_host.mode = value;
789 		break;
790 	case MMCBR_IVAR_OCR:
791 		sc->aw_host.ocr = value;
792 		break;
793 	case MMCBR_IVAR_POWER_MODE:
794 		sc->aw_host.ios.power_mode = value;
795 		break;
796 	case MMCBR_IVAR_VDD:
797 		sc->aw_host.ios.vdd = value;
798 		break;
799 	case MMCBR_IVAR_TIMING:
800 		sc->aw_host.ios.timing = value;
801 		break;
802 	/* These are read-only */
803 	case MMCBR_IVAR_CAPS:
804 	case MMCBR_IVAR_HOST_OCR:
805 	case MMCBR_IVAR_F_MIN:
806 	case MMCBR_IVAR_F_MAX:
807 	case MMCBR_IVAR_MAX_DATA:
808 		return (EINVAL);
809 	}
810 
811 	return (0);
812 }
813 
814 static int
815 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
816 {
817 	uint32_t reg;
818 	int retry;
819 
820 	reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
821 	reg &= ~(AW_MMC_CKCR_CCLK_ENB | AW_MMC_CKCR_CCLK_CTRL |
822 	    AW_MMC_CKCR_CCLK_MASK_DATA0);
823 
824 	if (clkon)
825 		reg |= AW_MMC_CKCR_CCLK_ENB;
826 	if (sc->aw_mmc_conf->mask_data0)
827 		reg |= AW_MMC_CKCR_CCLK_MASK_DATA0;
828 
829 	AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
830 
831 	reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
832 	    AW_MMC_CMDR_WAIT_PRE_OVER;
833 	AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
834 	retry = 0xfffff;
835 
836 	while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
837 		reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
838 		DELAY(10);
839 	}
840 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
841 
842 	if (reg & AW_MMC_CMDR_LOAD) {
843 		device_printf(sc->aw_dev, "timeout updating clock\n");
844 		return (ETIMEDOUT);
845 	}
846 
847 	if (sc->aw_mmc_conf->mask_data0) {
848 		reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
849 		reg &= ~AW_MMC_CKCR_CCLK_MASK_DATA0;
850 		AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
851 	}
852 
853 	return (0);
854 }
855 
856 static void
857 aw_mmc_set_power(struct aw_mmc_softc *sc, int32_t vdd)
858 {
859 	int min_uvolt, max_uvolt;
860 
861 	sc->aw_vdd = vdd;
862 
863 	if (sc->aw_reg_vmmc == NULL && sc->aw_reg_vqmmc == NULL)
864 		return;
865 
866 	switch (1 << vdd) {
867 	case MMC_OCR_LOW_VOLTAGE:
868 		min_uvolt = max_uvolt = 1800000;
869 		break;
870 	case MMC_OCR_320_330:
871 		min_uvolt = 3200000;
872 		max_uvolt = 3300000;
873 		break;
874 	case MMC_OCR_330_340:
875 		min_uvolt = 3300000;
876 		max_uvolt = 3400000;
877 		break;
878 	}
879 
880 	if (sc->aw_reg_vmmc)
881 		if (regulator_set_voltage(sc->aw_reg_vmmc,
882 		    min_uvolt, max_uvolt) != 0)
883 			device_printf(sc->aw_dev,
884 			    "Cannot set vmmc to %d<->%d\n",
885 			    min_uvolt,
886 			    max_uvolt);
887 	if (sc->aw_reg_vqmmc)
888 		if (regulator_set_voltage(sc->aw_reg_vqmmc,
889 		    min_uvolt, max_uvolt) != 0)
890 			device_printf(sc->aw_dev,
891 			    "Cannot set vqmmc to %d<->%d\n",
892 			    min_uvolt,
893 			    max_uvolt);
894 }
895 
896 static int
897 aw_mmc_update_ios(device_t bus, device_t child)
898 {
899 	int error;
900 	struct aw_mmc_softc *sc;
901 	struct mmc_ios *ios;
902 	unsigned int clock;
903 	uint32_t reg, div = 1;
904 
905 	sc = device_get_softc(bus);
906 
907 	ios = &sc->aw_host.ios;
908 
909 	/* Set the bus width. */
910 	switch (ios->bus_width) {
911 	case bus_width_1:
912 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
913 		break;
914 	case bus_width_4:
915 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
916 		break;
917 	case bus_width_8:
918 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
919 		break;
920 	}
921 
922 	/* Set the voltage */
923 	if (ios->power_mode == power_off) {
924 		if (bootverbose)
925 			device_printf(sc->aw_dev, "Powering down sd/mmc\n");
926 		if (sc->aw_reg_vmmc)
927 			regulator_disable(sc->aw_reg_vmmc);
928 		if (sc->aw_reg_vqmmc)
929 			regulator_disable(sc->aw_reg_vqmmc);
930 	} else if (sc->aw_vdd != ios->vdd)
931 		aw_mmc_set_power(sc, ios->vdd);
932 
933 	/* Enable ddr mode if needed */
934 	reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
935 	if (ios->timing == bus_timing_uhs_ddr50 ||
936 	  ios->timing == bus_timing_mmc_ddr52)
937 		reg |= AW_MMC_CTRL_DDR_MOD_SEL;
938 	else
939 		reg &= ~AW_MMC_CTRL_DDR_MOD_SEL;
940 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
941 
942 	if (ios->clock) {
943 		clock = ios->clock;
944 
945 		/* Disable clock */
946 		error = aw_mmc_update_clock(sc, 0);
947 		if (error != 0)
948 			return (error);
949 
950 		if (ios->timing == bus_timing_mmc_ddr52 &&
951 		    (sc->aw_mmc_conf->new_timing ||
952 		    ios->bus_width == bus_width_8)) {
953 			div = 2;
954 			clock <<= 1;
955 		}
956 
957 		/* Reset the divider. */
958 		reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
959 		reg &= ~AW_MMC_CKCR_CCLK_DIV;
960 		reg |= div - 1;
961 		AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
962 
963 		/* New timing mode if needed */
964 		if (sc->aw_mmc_conf->new_timing) {
965 			reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
966 			reg |= AW_MMC_NTSR_MODE_SELECT;
967 			AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
968 		}
969 
970 		/* Set the MMC clock. */
971 		error = clk_set_freq(sc->aw_clk_mmc, clock,
972 		    CLK_SET_ROUND_DOWN);
973 		if (error != 0) {
974 			device_printf(sc->aw_dev,
975 			    "failed to set frequency to %u Hz: %d\n",
976 			    clock, error);
977 			return (error);
978 		}
979 
980 		if (sc->aw_mmc_conf->can_calibrate)
981 			AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
982 
983 		/* Enable clock. */
984 		error = aw_mmc_update_clock(sc, 1);
985 		if (error != 0)
986 			return (error);
987 	}
988 
989 
990 	return (0);
991 }
992 
993 static int
994 aw_mmc_get_ro(device_t bus, device_t child)
995 {
996 
997 	return (0);
998 }
999 
1000 static int
1001 aw_mmc_acquire_host(device_t bus, device_t child)
1002 {
1003 	struct aw_mmc_softc *sc;
1004 	int error;
1005 
1006 	sc = device_get_softc(bus);
1007 	AW_MMC_LOCK(sc);
1008 	while (sc->aw_bus_busy) {
1009 		error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1010 		if (error != 0) {
1011 			AW_MMC_UNLOCK(sc);
1012 			return (error);
1013 		}
1014 	}
1015 	sc->aw_bus_busy++;
1016 	AW_MMC_UNLOCK(sc);
1017 
1018 	return (0);
1019 }
1020 
1021 static int
1022 aw_mmc_release_host(device_t bus, device_t child)
1023 {
1024 	struct aw_mmc_softc *sc;
1025 
1026 	sc = device_get_softc(bus);
1027 	AW_MMC_LOCK(sc);
1028 	sc->aw_bus_busy--;
1029 	wakeup(sc);
1030 	AW_MMC_UNLOCK(sc);
1031 
1032 	return (0);
1033 }
1034 
1035 static device_method_t aw_mmc_methods[] = {
1036 	/* Device interface */
1037 	DEVMETHOD(device_probe,		aw_mmc_probe),
1038 	DEVMETHOD(device_attach,	aw_mmc_attach),
1039 	DEVMETHOD(device_detach,	aw_mmc_detach),
1040 
1041 	/* Bus interface */
1042 	DEVMETHOD(bus_read_ivar,	aw_mmc_read_ivar),
1043 	DEVMETHOD(bus_write_ivar,	aw_mmc_write_ivar),
1044 
1045 	/* MMC bridge interface */
1046 	DEVMETHOD(mmcbr_update_ios,	aw_mmc_update_ios),
1047 	DEVMETHOD(mmcbr_request,	aw_mmc_request),
1048 	DEVMETHOD(mmcbr_get_ro,		aw_mmc_get_ro),
1049 	DEVMETHOD(mmcbr_acquire_host,	aw_mmc_acquire_host),
1050 	DEVMETHOD(mmcbr_release_host,	aw_mmc_release_host),
1051 
1052 	DEVMETHOD_END
1053 };
1054 
1055 static devclass_t aw_mmc_devclass;
1056 
1057 static driver_t aw_mmc_driver = {
1058 	"aw_mmc",
1059 	aw_mmc_methods,
1060 	sizeof(struct aw_mmc_softc),
1061 };
1062 
1063 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL,
1064     NULL);
1065 MMC_DECLARE_BRIDGE(aw_mmc);
1066