xref: /freebsd/sys/arm/allwinner/aw_mmc.c (revision 3049d4ccc031e5d4d0ff6b9a2445131f507778f2)
1 /*-
2  * Copyright (c) 2013 Alexander Fedorov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/resource.h>
39 #include <sys/rman.h>
40 #include <sys/sysctl.h>
41 
42 #include <machine/bus.h>
43 
44 #include <dev/ofw/ofw_bus.h>
45 #include <dev/ofw/ofw_bus_subr.h>
46 
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcbrvar.h>
49 
50 #include <arm/allwinner/aw_mmc.h>
51 #include <dev/extres/clk/clk.h>
52 #include <dev/extres/hwreset/hwreset.h>
53 #include <dev/extres/regulator/regulator.h>
54 
55 #define	AW_MMC_MEMRES		0
56 #define	AW_MMC_IRQRES		1
57 #define	AW_MMC_RESSZ		2
58 #define	AW_MMC_DMA_SEGS		((MAXPHYS / PAGE_SIZE) + 1)
59 #define	AW_MMC_DMA_FTRGLEVEL	0x20070008
60 #define	AW_MMC_RESET_RETRY	1000
61 
62 #define	CARD_ID_FREQUENCY	400000
63 
64 struct aw_mmc_conf {
65 	uint32_t	dma_xferlen;
66 	bool		mask_data0;
67 	bool		can_calibrate;
68 	bool		new_timing;
69 };
70 
71 static const struct aw_mmc_conf a10_mmc_conf = {
72 	.dma_xferlen = 0x2000,
73 };
74 
75 static const struct aw_mmc_conf a13_mmc_conf = {
76 	.dma_xferlen = 0x10000,
77 };
78 
79 static const struct aw_mmc_conf a64_mmc_conf = {
80 	.dma_xferlen = 0x10000,
81 	.mask_data0 = true,
82 	.can_calibrate = true,
83 	.new_timing = true,
84 };
85 
86 static const struct aw_mmc_conf a64_emmc_conf = {
87 	.dma_xferlen = 0x2000,
88 	.can_calibrate = true,
89 };
90 
91 static struct ofw_compat_data compat_data[] = {
92 	{"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
93 	{"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
94 	{"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
95 	{"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
96 	{"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
97 	{NULL,             0}
98 };
99 
100 struct aw_mmc_softc {
101 	device_t		aw_dev;
102 	clk_t			aw_clk_ahb;
103 	clk_t			aw_clk_mmc;
104 	hwreset_t		aw_rst_ahb;
105 	int			aw_bus_busy;
106 	int			aw_resid;
107 	int			aw_timeout;
108 	struct callout		aw_timeoutc;
109 	struct mmc_host		aw_host;
110 	struct mmc_request *	aw_req;
111 	struct mtx		aw_mtx;
112 	struct resource *	aw_res[AW_MMC_RESSZ];
113 	struct aw_mmc_conf *	aw_mmc_conf;
114 	uint32_t		aw_intr;
115 	uint32_t		aw_intr_wait;
116 	void *			aw_intrhand;
117 	int32_t			aw_vdd;
118 	regulator_t		aw_reg_vmmc;
119 	regulator_t		aw_reg_vqmmc;
120 	unsigned int		aw_clock;
121 
122 	/* Fields required for DMA access. */
123 	bus_addr_t	  	aw_dma_desc_phys;
124 	bus_dmamap_t		aw_dma_map;
125 	bus_dma_tag_t 		aw_dma_tag;
126 	void * 			aw_dma_desc;
127 	bus_dmamap_t		aw_dma_buf_map;
128 	bus_dma_tag_t		aw_dma_buf_tag;
129 	int			aw_dma_map_err;
130 };
131 
132 static struct resource_spec aw_mmc_res_spec[] = {
133 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
134 	{ SYS_RES_IRQ,		0,	RF_ACTIVE | RF_SHAREABLE },
135 	{ -1,			0,	0 }
136 };
137 
138 static int aw_mmc_probe(device_t);
139 static int aw_mmc_attach(device_t);
140 static int aw_mmc_detach(device_t);
141 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
142 static int aw_mmc_reset(struct aw_mmc_softc *);
143 static void aw_mmc_intr(void *);
144 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
145 
146 static int aw_mmc_update_ios(device_t, device_t);
147 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
148 static int aw_mmc_get_ro(device_t, device_t);
149 static int aw_mmc_acquire_host(device_t, device_t);
150 static int aw_mmc_release_host(device_t, device_t);
151 
152 #define	AW_MMC_LOCK(_sc)	mtx_lock(&(_sc)->aw_mtx)
153 #define	AW_MMC_UNLOCK(_sc)	mtx_unlock(&(_sc)->aw_mtx)
154 #define	AW_MMC_READ_4(_sc, _reg)					\
155 	bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
156 #define	AW_MMC_WRITE_4(_sc, _reg, _value)				\
157 	bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
158 
159 static int
160 aw_mmc_probe(device_t dev)
161 {
162 
163 	if (!ofw_bus_status_okay(dev))
164 		return (ENXIO);
165 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
166 		return (ENXIO);
167 
168 	device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
169 
170 	return (BUS_PROBE_DEFAULT);
171 }
172 
173 static int
174 aw_mmc_attach(device_t dev)
175 {
176 	device_t child;
177 	struct aw_mmc_softc *sc;
178 	struct sysctl_ctx_list *ctx;
179 	struct sysctl_oid_list *tree;
180 	uint32_t bus_width;
181 	phandle_t node;
182 	int error;
183 
184 	node = ofw_bus_get_node(dev);
185 	sc = device_get_softc(dev);
186 	sc->aw_dev = dev;
187 
188 	sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
189 
190 	sc->aw_req = NULL;
191 	if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
192 		device_printf(dev, "cannot allocate device resources\n");
193 		return (ENXIO);
194 	}
195 	if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
196 	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
197 	    &sc->aw_intrhand)) {
198 		bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
199 		device_printf(dev, "cannot setup interrupt handler\n");
200 		return (ENXIO);
201 	}
202 	mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
203 	    MTX_DEF);
204 	callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
205 
206 	/* De-assert reset */
207 	if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
208 		error = hwreset_deassert(sc->aw_rst_ahb);
209 		if (error != 0) {
210 			device_printf(dev, "cannot de-assert reset\n");
211 			goto fail;
212 		}
213 	}
214 
215 	/* Activate the module clock. */
216 	error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
217 	if (error != 0) {
218 		device_printf(dev, "cannot get ahb clock\n");
219 		goto fail;
220 	}
221 	error = clk_enable(sc->aw_clk_ahb);
222 	if (error != 0) {
223 		device_printf(dev, "cannot enable ahb clock\n");
224 		goto fail;
225 	}
226 	error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
227 	if (error != 0) {
228 		device_printf(dev, "cannot get mmc clock\n");
229 		goto fail;
230 	}
231 	error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
232 	    CLK_SET_ROUND_DOWN);
233 	if (error != 0) {
234 		device_printf(dev, "cannot init mmc clock\n");
235 		goto fail;
236 	}
237 	error = clk_enable(sc->aw_clk_mmc);
238 	if (error != 0) {
239 		device_printf(dev, "cannot enable mmc clock\n");
240 		goto fail;
241 	}
242 
243 	sc->aw_timeout = 10;
244 	ctx = device_get_sysctl_ctx(dev);
245 	tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
246 	SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
247 	    &sc->aw_timeout, 0, "Request timeout in seconds");
248 
249 	/* Hardware reset */
250 	AW_MMC_WRITE_4(sc, AW_MMC_HWRST, 1);
251 	DELAY(100);
252 	AW_MMC_WRITE_4(sc, AW_MMC_HWRST, 0);
253 	DELAY(500);
254 
255 	/* Soft Reset controller. */
256 	if (aw_mmc_reset(sc) != 0) {
257 		device_printf(dev, "cannot reset the controller\n");
258 		goto fail;
259 	}
260 
261 	if (aw_mmc_setup_dma(sc) != 0) {
262 		device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
263 		goto fail;
264 	}
265 
266 	if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
267 		bus_width = 4;
268 
269 	if (regulator_get_by_ofw_property(dev, 0, "vmmc-supply",
270 	    &sc->aw_reg_vmmc) == 0 && bootverbose)
271 		device_printf(dev, "vmmc-supply regulator found\n");
272 	if (regulator_get_by_ofw_property(dev, 0, "vqmmc-supply",
273 	    &sc->aw_reg_vqmmc) == 0 && bootverbose)
274 		device_printf(dev, "vqmmc-supply regulator found\n");
275 
276 	sc->aw_host.f_min = 400000;
277 	sc->aw_host.f_max = 52000000;
278 	sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
279 	sc->aw_host.caps = MMC_CAP_HSPEED | MMC_CAP_UHS_SDR12 |
280 			   MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 |
281 			   MMC_CAP_UHS_DDR50 | MMC_CAP_MMC_DDR52;
282 
283 	sc->aw_host.caps |= MMC_CAP_SIGNALING_330 /* | MMC_CAP_SIGNALING_180 */;
284 
285 	if (bus_width >= 4)
286 		sc->aw_host.caps |= MMC_CAP_4_BIT_DATA;
287 	if (bus_width >= 8)
288 		sc->aw_host.caps |= MMC_CAP_8_BIT_DATA;
289 
290 	child = device_add_child(dev, "mmc", -1);
291 	if (child == NULL) {
292 		device_printf(dev, "attaching MMC bus failed!\n");
293 		goto fail;
294 	}
295 	if (device_probe_and_attach(child) != 0) {
296 		device_printf(dev, "attaching MMC child failed!\n");
297 		device_delete_child(dev, child);
298 		goto fail;
299 	}
300 
301 	return (0);
302 
303 fail:
304 	callout_drain(&sc->aw_timeoutc);
305 	mtx_destroy(&sc->aw_mtx);
306 	bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
307 	bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
308 
309 	return (ENXIO);
310 }
311 
312 static int
313 aw_mmc_detach(device_t dev)
314 {
315 
316 	return (EBUSY);
317 }
318 
319 static void
320 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
321 {
322 	struct aw_mmc_softc *sc;
323 
324 	sc = (struct aw_mmc_softc *)arg;
325 	if (err) {
326 		sc->aw_dma_map_err = err;
327 		return;
328 	}
329 	sc->aw_dma_desc_phys = segs[0].ds_addr;
330 }
331 
332 static int
333 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
334 {
335 	int dma_desc_size, error;
336 
337 	/* Allocate the DMA descriptor memory. */
338 	dma_desc_size = sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS;
339 	error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
340 	    AW_MMC_DMA_ALIGN, 0,
341 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
342 	    dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->aw_dma_tag);
343 	if (error)
344 		return (error);
345 	error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
346 	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map);
347 	if (error)
348 		return (error);
349 
350 	error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map,
351 	    sc->aw_dma_desc, dma_desc_size, aw_dma_desc_cb, sc, 0);
352 	if (error)
353 		return (error);
354 	if (sc->aw_dma_map_err)
355 		return (sc->aw_dma_map_err);
356 
357 	/* Create the DMA map for data transfers. */
358 	error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
359 	    AW_MMC_DMA_ALIGN, 0,
360 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
361 	    sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS,
362 	    sc->aw_mmc_conf->dma_xferlen, BUS_DMA_ALLOCNOW, NULL, NULL,
363 	    &sc->aw_dma_buf_tag);
364 	if (error)
365 		return (error);
366 	error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
367 	    &sc->aw_dma_buf_map);
368 	if (error)
369 		return (error);
370 
371 	return (0);
372 }
373 
374 static void
375 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
376 {
377 	int i;
378 	struct aw_mmc_dma_desc *dma_desc;
379 	struct aw_mmc_softc *sc;
380 
381 	sc = (struct aw_mmc_softc *)arg;
382 	sc->aw_dma_map_err = err;
383 
384 	if (err)
385 		return;
386 
387 	dma_desc = sc->aw_dma_desc;
388 	for (i = 0; i < nsegs; i++) {
389 		dma_desc[i].buf_size = segs[i].ds_len;
390 		dma_desc[i].buf_addr = segs[i].ds_addr;
391 		dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
392 		    AW_MMC_DMA_CONFIG_OWN;
393 		if (i == 0)
394 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_FD;
395 		if (i < (nsegs - 1)) {
396 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_DIC;
397 			dma_desc[i].next = sc->aw_dma_desc_phys +
398 			    ((i + 1) * sizeof(struct aw_mmc_dma_desc));
399 		} else {
400 			dma_desc[i].config |= AW_MMC_DMA_CONFIG_LD |
401 			    AW_MMC_DMA_CONFIG_ER;
402 			dma_desc[i].next = 0;
403 		}
404 	}
405 }
406 
407 static int
408 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
409 {
410 	bus_dmasync_op_t sync_op;
411 	int error;
412 	struct mmc_command *cmd;
413 	uint32_t val;
414 
415 	cmd = sc->aw_req->cmd;
416 	if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
417 		return (EFBIG);
418 	error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
419 	    cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
420 	if (error)
421 		return (error);
422 	if (sc->aw_dma_map_err)
423 		return (sc->aw_dma_map_err);
424 
425 	if (cmd->data->flags & MMC_DATA_WRITE)
426 		sync_op = BUS_DMASYNC_PREWRITE;
427 	else
428 		sync_op = BUS_DMASYNC_PREREAD;
429 	bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
430 	bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
431 
432 	/* Enable DMA */
433 	val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
434 	val &= ~AW_MMC_CTRL_FIFO_AC_MOD;
435 	val |= AW_MMC_CTRL_DMA_ENB;
436 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
437 
438 	/* Reset DMA */
439 	val |= AW_MMC_CTRL_DMA_RST;
440 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
441 
442 	AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
443 	AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
444 	    AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
445 
446 	/* Enable RX or TX DMA interrupt */
447 	if (cmd->data->flags & MMC_DATA_WRITE)
448 		val |= AW_MMC_IDST_TX_INT;
449 	else
450 		val |= AW_MMC_IDST_RX_INT;
451 	AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
452 
453 	/* Set DMA descritptor list address */
454 	AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
455 
456 	/* FIFO trigger level */
457 	AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
458 
459 	return (0);
460 }
461 
462 static int
463 aw_mmc_reset(struct aw_mmc_softc *sc)
464 {
465 	int timeout;
466 
467 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, AW_MMC_RESET);
468 	timeout = 1000;
469 	while (--timeout > 0) {
470 		if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_RESET) == 0)
471 			break;
472 		DELAY(100);
473 	}
474 	if (timeout == 0)
475 		return (ETIMEDOUT);
476 
477 	/* Set the timeout. */
478 	AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
479 	    AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
480 	    AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
481 
482 	/* Clear pending interrupts. */
483 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
484 	AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
485 	/* Unmask interrupts. */
486 	AW_MMC_WRITE_4(sc, AW_MMC_IMKR,
487 	    AW_MMC_INT_CMD_DONE | AW_MMC_INT_ERR_BIT |
488 	    AW_MMC_INT_DATA_OVER | AW_MMC_INT_AUTO_STOP_DONE);
489 	/* Enable interrupts and AHB access. */
490 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL,
491 	    AW_MMC_READ_4(sc, AW_MMC_GCTL) | AW_MMC_CTRL_INT_ENB);
492 
493 	return (0);
494 }
495 
496 static void
497 aw_mmc_req_done(struct aw_mmc_softc *sc)
498 {
499 	struct mmc_command *cmd;
500 	struct mmc_request *req;
501 	uint32_t val, mask;
502 	int retry;
503 
504 	cmd = sc->aw_req->cmd;
505 	if (cmd->error != MMC_ERR_NONE) {
506 		/* Reset the FIFO and DMA engines. */
507 		mask = AW_MMC_CTRL_FIFO_RST | AW_MMC_CTRL_DMA_RST;
508 		val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
509 		AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
510 
511 		retry = AW_MMC_RESET_RETRY;
512 		while (--retry > 0) {
513 			val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
514 			if ((val & mask) == 0)
515 				break;
516 			DELAY(10);
517 		}
518 		if (retry == 0)
519 			device_printf(sc->aw_dev,
520 			    "timeout resetting DMA/FIFO\n");
521 		aw_mmc_update_clock(sc, 1);
522 	}
523 
524 	req = sc->aw_req;
525 	callout_stop(&sc->aw_timeoutc);
526 	sc->aw_req = NULL;
527 	sc->aw_intr = 0;
528 	sc->aw_resid = 0;
529 	sc->aw_dma_map_err = 0;
530 	sc->aw_intr_wait = 0;
531 	req->done(req);
532 }
533 
534 static void
535 aw_mmc_req_ok(struct aw_mmc_softc *sc)
536 {
537 	int timeout;
538 	struct mmc_command *cmd;
539 	uint32_t status;
540 
541 	timeout = 1000;
542 	while (--timeout > 0) {
543 		status = AW_MMC_READ_4(sc, AW_MMC_STAR);
544 		if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
545 			break;
546 		DELAY(1000);
547 	}
548 	cmd = sc->aw_req->cmd;
549 	if (timeout == 0) {
550 		cmd->error = MMC_ERR_FAILED;
551 		aw_mmc_req_done(sc);
552 		return;
553 	}
554 	if (cmd->flags & MMC_RSP_PRESENT) {
555 		if (cmd->flags & MMC_RSP_136) {
556 			cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
557 			cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
558 			cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
559 			cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
560 		} else
561 			cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
562 	}
563 	/* All data has been transferred ? */
564 	if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
565 		cmd->error = MMC_ERR_FAILED;
566 	aw_mmc_req_done(sc);
567 }
568 
569 static void
570 aw_mmc_timeout(void *arg)
571 {
572 	struct aw_mmc_softc *sc;
573 
574 	sc = (struct aw_mmc_softc *)arg;
575 	if (sc->aw_req != NULL) {
576 		device_printf(sc->aw_dev, "controller timeout\n");
577 		sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
578 		aw_mmc_req_done(sc);
579 	} else
580 		device_printf(sc->aw_dev,
581 		    "Spurious timeout - no active request\n");
582 }
583 
584 static void
585 aw_mmc_intr(void *arg)
586 {
587 	bus_dmasync_op_t sync_op;
588 	struct aw_mmc_softc *sc;
589 	struct mmc_data *data;
590 	uint32_t idst, imask, rint;
591 
592 	sc = (struct aw_mmc_softc *)arg;
593 	AW_MMC_LOCK(sc);
594 	rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
595 	idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
596 	imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
597 	if (idst == 0 && imask == 0 && rint == 0) {
598 		AW_MMC_UNLOCK(sc);
599 		return;
600 	}
601 #ifdef DEBUG
602 	device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
603 	    idst, imask, rint);
604 #endif
605 	if (sc->aw_req == NULL) {
606 		device_printf(sc->aw_dev,
607 		    "Spurious interrupt - no active request, rint: 0x%08X\n",
608 		    rint);
609 		goto end;
610 	}
611 	if (rint & AW_MMC_INT_ERR_BIT) {
612 		if (bootverbose)
613 			device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
614 		if (rint & AW_MMC_INT_RESP_TIMEOUT)
615 			sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
616 		else
617 			sc->aw_req->cmd->error = MMC_ERR_FAILED;
618 		aw_mmc_req_done(sc);
619 		goto end;
620 	}
621 	if (idst & AW_MMC_IDST_ERROR) {
622 		device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
623 		sc->aw_req->cmd->error = MMC_ERR_FAILED;
624 		aw_mmc_req_done(sc);
625 		goto end;
626 	}
627 
628 	sc->aw_intr |= rint;
629 	data = sc->aw_req->cmd->data;
630 	if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
631 		if (data->flags & MMC_DATA_WRITE)
632 			sync_op = BUS_DMASYNC_POSTWRITE;
633 		else
634 			sync_op = BUS_DMASYNC_POSTREAD;
635 		bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
636 		    sync_op);
637 		bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
638 		    BUS_DMASYNC_POSTWRITE);
639 		bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
640 		sc->aw_resid = data->len >> 2;
641 	}
642 	if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
643 		aw_mmc_req_ok(sc);
644 
645 end:
646 	AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
647 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
648 	AW_MMC_UNLOCK(sc);
649 }
650 
651 static int
652 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
653 {
654 	int blksz;
655 	struct aw_mmc_softc *sc;
656 	struct mmc_command *cmd;
657 	uint32_t cmdreg;
658 	int err;
659 
660 	sc = device_get_softc(bus);
661 	AW_MMC_LOCK(sc);
662 	if (sc->aw_req) {
663 		AW_MMC_UNLOCK(sc);
664 		return (EBUSY);
665 	}
666 	sc->aw_req = req;
667 	cmd = req->cmd;
668 	cmdreg = AW_MMC_CMDR_LOAD;
669 	if (cmd->opcode == MMC_GO_IDLE_STATE)
670 		cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
671 	if (cmd->flags & MMC_RSP_PRESENT)
672 		cmdreg |= AW_MMC_CMDR_RESP_RCV;
673 	if (cmd->flags & MMC_RSP_136)
674 		cmdreg |= AW_MMC_CMDR_LONG_RESP;
675 	if (cmd->flags & MMC_RSP_CRC)
676 		cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
677 
678 	sc->aw_intr = 0;
679 	sc->aw_resid = 0;
680 	sc->aw_intr_wait = AW_MMC_INT_CMD_DONE;
681 	cmd->error = MMC_ERR_NONE;
682 	if (cmd->data != NULL) {
683 		sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
684 		cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
685 		if (cmd->data->flags & MMC_DATA_MULTI) {
686 			cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
687 			sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
688 		}
689 		if (cmd->data->flags & MMC_DATA_WRITE)
690 			cmdreg |= AW_MMC_CMDR_DIR_WRITE;
691 		blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
692 		AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
693 		AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
694 
695 		err = aw_mmc_prepare_dma(sc);
696 		if (err != 0)
697 			device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
698 	}
699 
700 	AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
701 	AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
702 	callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
703 	    aw_mmc_timeout, sc);
704 	AW_MMC_UNLOCK(sc);
705 
706 	return (0);
707 }
708 
709 static int
710 aw_mmc_read_ivar(device_t bus, device_t child, int which,
711     uintptr_t *result)
712 {
713 	struct aw_mmc_softc *sc;
714 
715 	sc = device_get_softc(bus);
716 	switch (which) {
717 	default:
718 		return (EINVAL);
719 	case MMCBR_IVAR_BUS_MODE:
720 		*(int *)result = sc->aw_host.ios.bus_mode;
721 		break;
722 	case MMCBR_IVAR_BUS_WIDTH:
723 		*(int *)result = sc->aw_host.ios.bus_width;
724 		break;
725 	case MMCBR_IVAR_CHIP_SELECT:
726 		*(int *)result = sc->aw_host.ios.chip_select;
727 		break;
728 	case MMCBR_IVAR_CLOCK:
729 		*(int *)result = sc->aw_host.ios.clock;
730 		break;
731 	case MMCBR_IVAR_F_MIN:
732 		*(int *)result = sc->aw_host.f_min;
733 		break;
734 	case MMCBR_IVAR_F_MAX:
735 		*(int *)result = sc->aw_host.f_max;
736 		break;
737 	case MMCBR_IVAR_HOST_OCR:
738 		*(int *)result = sc->aw_host.host_ocr;
739 		break;
740 	case MMCBR_IVAR_MODE:
741 		*(int *)result = sc->aw_host.mode;
742 		break;
743 	case MMCBR_IVAR_OCR:
744 		*(int *)result = sc->aw_host.ocr;
745 		break;
746 	case MMCBR_IVAR_POWER_MODE:
747 		*(int *)result = sc->aw_host.ios.power_mode;
748 		break;
749 	case MMCBR_IVAR_VDD:
750 		*(int *)result = sc->aw_host.ios.vdd;
751 		break;
752 	case MMCBR_IVAR_CAPS:
753 		*(int *)result = sc->aw_host.caps;
754 		break;
755 	case MMCBR_IVAR_TIMING:
756 		*(int *)result = sc->aw_host.ios.timing;
757 		break;
758 	case MMCBR_IVAR_MAX_DATA:
759 		*(int *)result = 65535;
760 		break;
761 	}
762 
763 	return (0);
764 }
765 
766 static int
767 aw_mmc_write_ivar(device_t bus, device_t child, int which,
768     uintptr_t value)
769 {
770 	struct aw_mmc_softc *sc;
771 
772 	sc = device_get_softc(bus);
773 	switch (which) {
774 	default:
775 		return (EINVAL);
776 	case MMCBR_IVAR_BUS_MODE:
777 		sc->aw_host.ios.bus_mode = value;
778 		break;
779 	case MMCBR_IVAR_BUS_WIDTH:
780 		sc->aw_host.ios.bus_width = value;
781 		break;
782 	case MMCBR_IVAR_CHIP_SELECT:
783 		sc->aw_host.ios.chip_select = value;
784 		break;
785 	case MMCBR_IVAR_CLOCK:
786 		sc->aw_host.ios.clock = value;
787 		break;
788 	case MMCBR_IVAR_MODE:
789 		sc->aw_host.mode = value;
790 		break;
791 	case MMCBR_IVAR_OCR:
792 		sc->aw_host.ocr = value;
793 		break;
794 	case MMCBR_IVAR_POWER_MODE:
795 		sc->aw_host.ios.power_mode = value;
796 		break;
797 	case MMCBR_IVAR_VDD:
798 		sc->aw_host.ios.vdd = value;
799 		break;
800 	case MMCBR_IVAR_TIMING:
801 		sc->aw_host.ios.timing = value;
802 		break;
803 	/* These are read-only */
804 	case MMCBR_IVAR_CAPS:
805 	case MMCBR_IVAR_HOST_OCR:
806 	case MMCBR_IVAR_F_MIN:
807 	case MMCBR_IVAR_F_MAX:
808 	case MMCBR_IVAR_MAX_DATA:
809 		return (EINVAL);
810 	}
811 
812 	return (0);
813 }
814 
815 static int
816 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
817 {
818 	uint32_t reg;
819 	int retry;
820 
821 	reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
822 	reg &= ~(AW_MMC_CKCR_CCLK_ENB | AW_MMC_CKCR_CCLK_CTRL |
823 	    AW_MMC_CKCR_CCLK_MASK_DATA0);
824 
825 	if (clkon)
826 		reg |= AW_MMC_CKCR_CCLK_ENB;
827 	if (sc->aw_mmc_conf->mask_data0)
828 		reg |= AW_MMC_CKCR_CCLK_MASK_DATA0;
829 
830 	AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
831 
832 	reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
833 	    AW_MMC_CMDR_WAIT_PRE_OVER;
834 	AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
835 	retry = 0xfffff;
836 
837 	while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
838 		reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
839 		DELAY(10);
840 	}
841 	AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
842 
843 	if (reg & AW_MMC_CMDR_LOAD) {
844 		device_printf(sc->aw_dev, "timeout updating clock\n");
845 		return (ETIMEDOUT);
846 	}
847 
848 	if (sc->aw_mmc_conf->mask_data0) {
849 		reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
850 		reg &= ~AW_MMC_CKCR_CCLK_MASK_DATA0;
851 		AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
852 	}
853 
854 	return (0);
855 }
856 
857 static void
858 aw_mmc_set_power(struct aw_mmc_softc *sc, int32_t vdd)
859 {
860 	int min_uvolt, max_uvolt;
861 
862 	sc->aw_vdd = vdd;
863 
864 	if (sc->aw_reg_vmmc == NULL && sc->aw_reg_vqmmc == NULL)
865 		return;
866 
867 	switch (1 << vdd) {
868 	case MMC_OCR_LOW_VOLTAGE:
869 		min_uvolt = max_uvolt = 1800000;
870 		break;
871 	case MMC_OCR_320_330:
872 		min_uvolt = 3200000;
873 		max_uvolt = 3300000;
874 		break;
875 	case MMC_OCR_330_340:
876 		min_uvolt = 3300000;
877 		max_uvolt = 3400000;
878 		break;
879 	}
880 
881 	if (sc->aw_reg_vmmc)
882 		if (regulator_set_voltage(sc->aw_reg_vmmc,
883 		    min_uvolt, max_uvolt) != 0)
884 			device_printf(sc->aw_dev,
885 			    "Cannot set vmmc to %d<->%d\n",
886 			    min_uvolt,
887 			    max_uvolt);
888 	if (sc->aw_reg_vqmmc)
889 		if (regulator_set_voltage(sc->aw_reg_vqmmc,
890 		    min_uvolt, max_uvolt) != 0)
891 			device_printf(sc->aw_dev,
892 			    "Cannot set vqmmc to %d<->%d\n",
893 			    min_uvolt,
894 			    max_uvolt);
895 }
896 
897 static int
898 aw_mmc_update_ios(device_t bus, device_t child)
899 {
900 	int error;
901 	struct aw_mmc_softc *sc;
902 	struct mmc_ios *ios;
903 	unsigned int clock;
904 	uint32_t reg, div = 1;
905 
906 	sc = device_get_softc(bus);
907 
908 	ios = &sc->aw_host.ios;
909 
910 	/* Set the bus width. */
911 	switch (ios->bus_width) {
912 	case bus_width_1:
913 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
914 		break;
915 	case bus_width_4:
916 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
917 		break;
918 	case bus_width_8:
919 		AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
920 		break;
921 	}
922 
923 	/* Set the voltage */
924 	if (ios->power_mode == power_off) {
925 		if (bootverbose)
926 			device_printf(sc->aw_dev, "Powering down sd/mmc\n");
927 		if (sc->aw_reg_vmmc)
928 			regulator_disable(sc->aw_reg_vmmc);
929 		if (sc->aw_reg_vqmmc)
930 			regulator_disable(sc->aw_reg_vqmmc);
931 	} else if (sc->aw_vdd != ios->vdd)
932 		aw_mmc_set_power(sc, ios->vdd);
933 
934 	/* Enable ddr mode if needed */
935 	reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
936 	if (ios->timing == bus_timing_uhs_ddr50 ||
937 	  ios->timing == bus_timing_mmc_ddr52)
938 		reg |= AW_MMC_CTRL_DDR_MOD_SEL;
939 	else
940 		reg &= ~AW_MMC_CTRL_DDR_MOD_SEL;
941 	AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
942 
943 	if (ios->clock && ios->clock != sc->aw_clock) {
944 		sc->aw_clock = clock = ios->clock;
945 
946 		/* Disable clock */
947 		error = aw_mmc_update_clock(sc, 0);
948 		if (error != 0)
949 			return (error);
950 
951 		if (ios->timing == bus_timing_mmc_ddr52 &&
952 		    (sc->aw_mmc_conf->new_timing ||
953 		    ios->bus_width == bus_width_8)) {
954 			div = 2;
955 			clock <<= 1;
956 		}
957 
958 		/* Reset the divider. */
959 		reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
960 		reg &= ~AW_MMC_CKCR_CCLK_DIV;
961 		reg |= div - 1;
962 		AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
963 
964 		/* New timing mode if needed */
965 		if (sc->aw_mmc_conf->new_timing) {
966 			reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
967 			reg |= AW_MMC_NTSR_MODE_SELECT;
968 			AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
969 		}
970 
971 		/* Set the MMC clock. */
972 		error = clk_set_freq(sc->aw_clk_mmc, clock,
973 		    CLK_SET_ROUND_DOWN);
974 		if (error != 0) {
975 			device_printf(sc->aw_dev,
976 			    "failed to set frequency to %u Hz: %d\n",
977 			    clock, error);
978 			return (error);
979 		}
980 
981 		if (sc->aw_mmc_conf->can_calibrate)
982 			AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
983 
984 		/* Enable clock. */
985 		error = aw_mmc_update_clock(sc, 1);
986 		if (error != 0)
987 			return (error);
988 	}
989 
990 
991 	return (0);
992 }
993 
994 static int
995 aw_mmc_get_ro(device_t bus, device_t child)
996 {
997 
998 	return (0);
999 }
1000 
1001 static int
1002 aw_mmc_acquire_host(device_t bus, device_t child)
1003 {
1004 	struct aw_mmc_softc *sc;
1005 	int error;
1006 
1007 	sc = device_get_softc(bus);
1008 	AW_MMC_LOCK(sc);
1009 	while (sc->aw_bus_busy) {
1010 		error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1011 		if (error != 0) {
1012 			AW_MMC_UNLOCK(sc);
1013 			return (error);
1014 		}
1015 	}
1016 	sc->aw_bus_busy++;
1017 	AW_MMC_UNLOCK(sc);
1018 
1019 	return (0);
1020 }
1021 
1022 static int
1023 aw_mmc_release_host(device_t bus, device_t child)
1024 {
1025 	struct aw_mmc_softc *sc;
1026 
1027 	sc = device_get_softc(bus);
1028 	AW_MMC_LOCK(sc);
1029 	sc->aw_bus_busy--;
1030 	wakeup(sc);
1031 	AW_MMC_UNLOCK(sc);
1032 
1033 	return (0);
1034 }
1035 
1036 static device_method_t aw_mmc_methods[] = {
1037 	/* Device interface */
1038 	DEVMETHOD(device_probe,		aw_mmc_probe),
1039 	DEVMETHOD(device_attach,	aw_mmc_attach),
1040 	DEVMETHOD(device_detach,	aw_mmc_detach),
1041 
1042 	/* Bus interface */
1043 	DEVMETHOD(bus_read_ivar,	aw_mmc_read_ivar),
1044 	DEVMETHOD(bus_write_ivar,	aw_mmc_write_ivar),
1045 
1046 	/* MMC bridge interface */
1047 	DEVMETHOD(mmcbr_update_ios,	aw_mmc_update_ios),
1048 	DEVMETHOD(mmcbr_request,	aw_mmc_request),
1049 	DEVMETHOD(mmcbr_get_ro,		aw_mmc_get_ro),
1050 	DEVMETHOD(mmcbr_acquire_host,	aw_mmc_acquire_host),
1051 	DEVMETHOD(mmcbr_release_host,	aw_mmc_release_host),
1052 
1053 	DEVMETHOD_END
1054 };
1055 
1056 static devclass_t aw_mmc_devclass;
1057 
1058 static driver_t aw_mmc_driver = {
1059 	"aw_mmc",
1060 	aw_mmc_methods,
1061 	sizeof(struct aw_mmc_softc),
1062 };
1063 
1064 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL,
1065     NULL);
1066 MMC_DECLARE_BRIDGE(aw_mmc);
1067