xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision 77013d11e6483b970af25e13c9b892075742f7e5)
1 /*-
2  * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/rman.h>
48 #include <sys/queue.h>
49 #include <sys/taskqueue.h>
50 
51 #include <dev/mmc/bridge.h>
52 #include <dev/mmc/mmcbrvar.h>
53 #include <dev/mmc/mmc_fdt_helpers.h>
54 
55 #include <dev/fdt/fdt_common.h>
56 #include <dev/ofw/openfirm.h>
57 #include <dev/ofw/ofw_bus.h>
58 #include <dev/ofw/ofw_bus_subr.h>
59 
60 #include <machine/bus.h>
61 #include <machine/cpu.h>
62 #include <machine/intr.h>
63 
64 #ifdef EXT_RESOURCES
65 #include <dev/extres/clk/clk.h>
66 #endif
67 
68 #include <dev/mmc/host/dwmmc_reg.h>
69 #include <dev/mmc/host/dwmmc_var.h>
70 
71 #include "opt_mmccam.h"
72 
73 #ifdef MMCCAM
74 #include <cam/cam.h>
75 #include <cam/cam_ccb.h>
76 #include <cam/cam_debug.h>
77 #include <cam/cam_sim.h>
78 #include <cam/cam_xpt_sim.h>
79 
80 #include "mmc_sim_if.h"
81 #endif
82 
83 #include "mmcbr_if.h"
84 
85 #ifdef DEBUG
86 #define dprintf(fmt, args...) printf(fmt, ##args)
87 #else
88 #define dprintf(x, arg...)
89 #endif
90 
91 #define	READ4(_sc, _reg) \
92 	bus_read_4((_sc)->res[0], _reg)
93 #define	WRITE4(_sc, _reg, _val) \
94 	bus_write_4((_sc)->res[0], _reg, _val)
95 
96 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
97 
98 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
99 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
100 #define	DWMMC_LOCK_INIT(_sc) \
101 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
102 	    "dwmmc", MTX_DEF)
103 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
104 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
105 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
106 
107 #define	PENDING_CMD	0x01
108 #define	PENDING_STOP	0x02
109 #define	CARD_INIT_DONE	0x04
110 
111 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
112 				|SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE)
113 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
114 				|SDMMC_INTMASK_RE)
115 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
116 				|SDMMC_INTMASK_HLE)
117 
118 #define	DES0_DIC	(1 << 1)	/* Disable Interrupt on Completion */
119 #define	DES0_LD		(1 << 2)	/* Last Descriptor */
120 #define	DES0_FS		(1 << 3)	/* First Descriptor */
121 #define	DES0_CH		(1 << 4)	/* second address CHained */
122 #define	DES0_ER		(1 << 5)	/* End of Ring */
123 #define	DES0_CES	(1 << 30)	/* Card Error Summary */
124 #define	DES0_OWN	(1 << 31)	/* OWN */
125 
126 #define	DES1_BS1_MASK	0x1fff
127 
128 struct idmac_desc {
129 	uint32_t	des0;	/* control */
130 	uint32_t	des1;	/* bufsize */
131 	uint32_t	des2;	/* buf1 phys addr */
132 	uint32_t	des3;	/* buf2 phys addr or next descr */
133 };
134 
135 #define	IDMAC_DESC_SEGS	(PAGE_SIZE / (sizeof(struct idmac_desc)))
136 #define	IDMAC_DESC_SIZE	(sizeof(struct idmac_desc) * IDMAC_DESC_SEGS)
137 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
138 /*
139  * Size field in DMA descriptor is 13 bits long (up to 4095 bytes),
140  * but must be a multiple of the data bus size.Additionally, we must ensure
141  * that bus_dmamap_load() doesn't additionally fragments buffer (because it
142  * is processed with page size granularity). Thus limit fragment size to half
143  * of page.
144  * XXX switch descriptor format to array and use second buffer pointer for
145  * second half of page
146  */
147 #define	IDMAC_MAX_SIZE	2048
148 
149 static void dwmmc_next_operation(struct dwmmc_softc *);
150 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
151 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
152 static int dma_stop(struct dwmmc_softc *);
153 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
154 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
155 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present);
156 
157 static struct resource_spec dwmmc_spec[] = {
158 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
159 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
160 	{ -1, 0 }
161 };
162 
163 #define	HWTYPE_MASK		(0x0000ffff)
164 #define	HWFLAG_MASK		(0xffff << 16)
165 
166 static void
167 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
168 {
169 
170 	if (nsegs != 1)
171 		panic("%s: nsegs != 1 (%d)\n", __func__, nsegs);
172 	if (error != 0)
173 		panic("%s: error != 0 (%d)\n", __func__, error);
174 
175 	*(bus_addr_t *)arg = segs[0].ds_addr;
176 }
177 
178 static void
179 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
180 {
181 	struct dwmmc_softc *sc;
182 	int idx;
183 
184 	sc = arg;
185 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
186 	if (error != 0)
187 		panic("%s: error != 0 (%d)\n", __func__, error);
188 
189 	for (idx = 0; idx < nsegs; idx++) {
190 		sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH;
191 		sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK;
192 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
193 
194 		if (idx == 0)
195 			sc->desc_ring[idx].des0 |= DES0_FS;
196 
197 		if (idx == (nsegs - 1)) {
198 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
199 			sc->desc_ring[idx].des0 |= DES0_LD;
200 		}
201 		wmb();
202 		sc->desc_ring[idx].des0 |= DES0_OWN;
203 	}
204 }
205 
206 static int
207 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
208 {
209 	int reg;
210 	int i;
211 
212 	reg = READ4(sc, SDMMC_CTRL);
213 	reg |= (reset_bits);
214 	WRITE4(sc, SDMMC_CTRL, reg);
215 
216 	/* Wait reset done */
217 	for (i = 0; i < 100; i++) {
218 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
219 			return (0);
220 		DELAY(10);
221 	}
222 
223 	device_printf(sc->dev, "Reset failed\n");
224 
225 	return (1);
226 }
227 
228 static int
229 dma_setup(struct dwmmc_softc *sc)
230 {
231 	int error;
232 	int nidx;
233 	int idx;
234 
235 	/*
236 	 * Set up TX descriptor ring, descriptors, and dma maps.
237 	 */
238 	error = bus_dma_tag_create(
239 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
240 	    4096, 0,			/* alignment, boundary */
241 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
242 	    BUS_SPACE_MAXADDR,		/* highaddr */
243 	    NULL, NULL,			/* filter, filterarg */
244 	    IDMAC_DESC_SIZE, 1,		/* maxsize, nsegments */
245 	    IDMAC_DESC_SIZE,		/* maxsegsize */
246 	    0,				/* flags */
247 	    NULL, NULL,			/* lockfunc, lockarg */
248 	    &sc->desc_tag);
249 	if (error != 0) {
250 		device_printf(sc->dev,
251 		    "could not create ring DMA tag.\n");
252 		return (1);
253 	}
254 
255 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
256 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
257 	    &sc->desc_map);
258 	if (error != 0) {
259 		device_printf(sc->dev,
260 		    "could not allocate descriptor ring.\n");
261 		return (1);
262 	}
263 
264 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
265 	    sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr,
266 	    &sc->desc_ring_paddr, 0);
267 	if (error != 0) {
268 		device_printf(sc->dev,
269 		    "could not load descriptor ring map.\n");
270 		return (1);
271 	}
272 
273 	for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) {
274 		sc->desc_ring[idx].des0 = DES0_CH;
275 		sc->desc_ring[idx].des1 = 0;
276 		nidx = (idx + 1) % IDMAC_DESC_SEGS;
277 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
278 		    (nidx * sizeof(struct idmac_desc));
279 	}
280 	sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr;
281 	sc->desc_ring[idx - 1].des0 |= DES0_ER;
282 
283 	error = bus_dma_tag_create(
284 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
285 	    8, 0,			/* alignment, boundary */
286 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
287 	    BUS_SPACE_MAXADDR,		/* highaddr */
288 	    NULL, NULL,			/* filter, filterarg */
289 	    IDMAC_MAX_SIZE * IDMAC_DESC_SEGS,	/* maxsize */
290 	    IDMAC_DESC_SEGS,		/* nsegments */
291 	    IDMAC_MAX_SIZE,		/* maxsegsize */
292 	    0,				/* flags */
293 	    NULL, NULL,			/* lockfunc, lockarg */
294 	    &sc->buf_tag);
295 	if (error != 0) {
296 		device_printf(sc->dev,
297 		    "could not create ring DMA tag.\n");
298 		return (1);
299 	}
300 
301 	error = bus_dmamap_create(sc->buf_tag, 0,
302 	    &sc->buf_map);
303 	if (error != 0) {
304 		device_printf(sc->dev,
305 		    "could not create TX buffer DMA map.\n");
306 		return (1);
307 	}
308 
309 	return (0);
310 }
311 
312 static void
313 dwmmc_cmd_done(struct dwmmc_softc *sc)
314 {
315 	struct mmc_command *cmd;
316 #ifdef MMCCAM
317 	union ccb *ccb;
318 #endif
319 
320 #ifdef MMCCAM
321 	ccb = sc->ccb;
322 	if (ccb == NULL)
323 		return;
324 	cmd = &ccb->mmcio.cmd;
325 #else
326 	cmd = sc->curcmd;
327 #endif
328 	if (cmd == NULL)
329 		return;
330 
331 	if (cmd->flags & MMC_RSP_PRESENT) {
332 		if (cmd->flags & MMC_RSP_136) {
333 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
334 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
335 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
336 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
337 		} else {
338 			cmd->resp[3] = 0;
339 			cmd->resp[2] = 0;
340 			cmd->resp[1] = 0;
341 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
342 		}
343 	}
344 }
345 
346 static void
347 dwmmc_tasklet(struct dwmmc_softc *sc)
348 {
349 	struct mmc_command *cmd;
350 
351 	cmd = sc->curcmd;
352 	if (cmd == NULL)
353 		return;
354 
355 	if (!sc->cmd_done)
356 		return;
357 
358 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
359 		dwmmc_next_operation(sc);
360 	} else if (cmd->data && sc->dto_rcvd) {
361 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
362 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
363 		     sc->use_auto_stop) {
364 			if (sc->acd_rcvd)
365 				dwmmc_next_operation(sc);
366 		} else {
367 			dwmmc_next_operation(sc);
368 		}
369 	}
370 }
371 
372 static void
373 dwmmc_intr(void *arg)
374 {
375 	struct mmc_command *cmd;
376 	struct dwmmc_softc *sc;
377 	uint32_t reg;
378 
379 	sc = arg;
380 
381 	DWMMC_LOCK(sc);
382 
383 	cmd = sc->curcmd;
384 
385 	/* First handle SDMMC controller interrupts */
386 	reg = READ4(sc, SDMMC_MINTSTS);
387 	if (reg) {
388 		dprintf("%s 0x%08x\n", __func__, reg);
389 
390 		if (reg & DWMMC_CMD_ERR_FLAGS) {
391 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
392 				reg, cmd->opcode);
393 			cmd->error = MMC_ERR_TIMEOUT;
394 		}
395 
396 		if (reg & DWMMC_DATA_ERR_FLAGS) {
397 			dprintf("data err 0x%08x cmd 0x%08x\n",
398 				reg, cmd->opcode);
399 			cmd->error = MMC_ERR_FAILED;
400 			if (!sc->use_pio) {
401 				dma_done(sc, cmd);
402 				dma_stop(sc);
403 			}
404 		}
405 
406 		if (reg & SDMMC_INTMASK_CMD_DONE) {
407 			dwmmc_cmd_done(sc);
408 			sc->cmd_done = 1;
409 		}
410 
411 		if (reg & SDMMC_INTMASK_ACD)
412 			sc->acd_rcvd = 1;
413 
414 		if (reg & SDMMC_INTMASK_DTO)
415 			sc->dto_rcvd = 1;
416 
417 		if (reg & SDMMC_INTMASK_CD) {
418 			dwmmc_handle_card_present(sc,
419 			    READ4(sc, SDMMC_CDETECT) == 0 ? true : false);
420 		}
421 	}
422 
423 	/* Ack interrupts */
424 	WRITE4(sc, SDMMC_RINTSTS, reg);
425 
426 	if (sc->use_pio) {
427 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
428 			pio_read(sc, cmd);
429 		}
430 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
431 			pio_write(sc, cmd);
432 		}
433 	} else {
434 		/* Now handle DMA interrupts */
435 		reg = READ4(sc, SDMMC_IDSTS);
436 		if (reg) {
437 			dprintf("dma intr 0x%08x\n", reg);
438 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
439 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
440 							 SDMMC_IDINTEN_RI));
441 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
442 				dma_done(sc, cmd);
443 			}
444 		}
445 	}
446 
447 	dwmmc_tasklet(sc);
448 
449 	DWMMC_UNLOCK(sc);
450 }
451 
452 static void
453 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
454 {
455 	bool was_present;
456 
457 	was_present = sc->child != NULL;
458 
459 	if (!was_present && is_present) {
460 		taskqueue_enqueue_timeout(taskqueue_swi_giant,
461 		  &sc->card_delayed_task, -(hz / 2));
462 	} else if (was_present && !is_present) {
463 		taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task);
464 	}
465 }
466 
467 static void
468 dwmmc_card_task(void *arg, int pending __unused)
469 {
470 	struct dwmmc_softc *sc = arg;
471 
472 #ifdef MMCCAM
473 	mmc_cam_sim_discover(&sc->mmc_sim);
474 #else
475 	DWMMC_LOCK(sc);
476 
477 	if (READ4(sc, SDMMC_CDETECT) == 0 ||
478 	    (sc->mmc_helper.props & MMC_PROP_BROKEN_CD)) {
479 		if (sc->child == NULL) {
480 			if (bootverbose)
481 				device_printf(sc->dev, "Card inserted\n");
482 
483 			sc->child = device_add_child(sc->dev, "mmc", -1);
484 			DWMMC_UNLOCK(sc);
485 			if (sc->child) {
486 				device_set_ivars(sc->child, sc);
487 				(void)device_probe_and_attach(sc->child);
488 			}
489 		} else
490 			DWMMC_UNLOCK(sc);
491 	} else {
492 		/* Card isn't present, detach if necessary */
493 		if (sc->child != NULL) {
494 			if (bootverbose)
495 				device_printf(sc->dev, "Card removed\n");
496 
497 			DWMMC_UNLOCK(sc);
498 			device_delete_child(sc->dev, sc->child);
499 			sc->child = NULL;
500 		} else
501 			DWMMC_UNLOCK(sc);
502 	}
503 #endif /* MMCCAM */
504 }
505 
506 static int
507 parse_fdt(struct dwmmc_softc *sc)
508 {
509 	pcell_t dts_value[3];
510 	phandle_t node;
511 	uint32_t bus_hz = 0;
512 	int len;
513 #ifdef EXT_RESOURCES
514 	int error;
515 #endif
516 
517 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
518 		return (ENXIO);
519 
520 	/* Set some defaults for freq and supported mode */
521 	sc->host.f_min = 400000;
522 	sc->host.f_max = 200000000;
523 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
524 	sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
525 	mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host);
526 
527 	/* fifo-depth */
528 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
529 		OF_getencprop(node, "fifo-depth", dts_value, len);
530 		sc->fifo_depth = dts_value[0];
531 	}
532 
533 	/* num-slots (Deprecated) */
534 	sc->num_slots = 1;
535 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
536 		device_printf(sc->dev, "num-slots property is deprecated\n");
537 		OF_getencprop(node, "num-slots", dts_value, len);
538 		sc->num_slots = dts_value[0];
539 	}
540 
541 	/* clock-frequency */
542 	if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
543 		OF_getencprop(node, "clock-frequency", dts_value, len);
544 		bus_hz = dts_value[0];
545 	}
546 
547 #ifdef EXT_RESOURCES
548 
549 	/* IP block reset is optional */
550 	error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset);
551 	if (error != 0 &&
552 	    error != ENOENT &&
553 	    error != ENODEV) {
554 		device_printf(sc->dev, "Cannot get reset\n");
555 		goto fail;
556 	}
557 
558 	/* vmmc regulator is optional */
559 	error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply",
560 	     &sc->vmmc);
561 	if (error != 0 &&
562 	    error != ENOENT &&
563 	    error != ENODEV) {
564 		device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n");
565 		goto fail;
566 	}
567 
568 	/* vqmmc regulator is optional */
569 	error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply",
570 	     &sc->vqmmc);
571 	if (error != 0 &&
572 	    error != ENOENT &&
573 	    error != ENODEV) {
574 		device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n");
575 		goto fail;
576 	}
577 
578 	/* Assert reset first */
579 	if (sc->hwreset != NULL) {
580 		error = hwreset_assert(sc->hwreset);
581 		if (error != 0) {
582 			device_printf(sc->dev, "Cannot assert reset\n");
583 			goto fail;
584 		}
585 	}
586 
587 	/* BIU (Bus Interface Unit clock) is optional */
588 	error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
589 	if (error != 0 &&
590 	    error != ENOENT &&
591 	    error != ENODEV) {
592 		device_printf(sc->dev, "Cannot get 'biu' clock\n");
593 		goto fail;
594 	}
595 
596 	if (sc->biu) {
597 		error = clk_enable(sc->biu);
598 		if (error != 0) {
599 			device_printf(sc->dev, "cannot enable biu clock\n");
600 			goto fail;
601 		}
602 	}
603 
604 	/*
605 	 * CIU (Controller Interface Unit clock) is mandatory
606 	 * if no clock-frequency property is given
607 	 */
608 	error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
609 	if (error != 0 &&
610 	    error != ENOENT &&
611 	    error != ENODEV) {
612 		device_printf(sc->dev, "Cannot get 'ciu' clock\n");
613 		goto fail;
614 	}
615 
616 	if (sc->ciu) {
617 		if (bus_hz != 0) {
618 			error = clk_set_freq(sc->ciu, bus_hz, 0);
619 			if (error != 0)
620 				device_printf(sc->dev,
621 				    "cannot set ciu clock to %u\n", bus_hz);
622 		}
623 		error = clk_enable(sc->ciu);
624 		if (error != 0) {
625 			device_printf(sc->dev, "cannot enable ciu clock\n");
626 			goto fail;
627 		}
628 		clk_get_freq(sc->ciu, &sc->bus_hz);
629 	}
630 
631 	/* Enable regulators */
632 	if (sc->vmmc != NULL) {
633 		error = regulator_enable(sc->vmmc);
634 		if (error != 0) {
635 			device_printf(sc->dev, "Cannot enable vmmc regulator\n");
636 			goto fail;
637 		}
638 	}
639 	if (sc->vqmmc != NULL) {
640 		error = regulator_enable(sc->vqmmc);
641 		if (error != 0) {
642 			device_printf(sc->dev, "Cannot enable vqmmc regulator\n");
643 			goto fail;
644 		}
645 	}
646 
647 	/* Take dwmmc out of reset */
648 	if (sc->hwreset != NULL) {
649 		error = hwreset_deassert(sc->hwreset);
650 		if (error != 0) {
651 			device_printf(sc->dev, "Cannot deassert reset\n");
652 			goto fail;
653 		}
654 	}
655 #endif /* EXT_RESOURCES */
656 
657 	if (sc->bus_hz == 0) {
658 		device_printf(sc->dev, "No bus speed provided\n");
659 		goto fail;
660 	}
661 
662 	return (0);
663 
664 fail:
665 	return (ENXIO);
666 }
667 
668 int
669 dwmmc_attach(device_t dev)
670 {
671 	struct dwmmc_softc *sc;
672 	int error;
673 	int slot;
674 
675 	sc = device_get_softc(dev);
676 
677 	sc->dev = dev;
678 
679 	/* Why not to use Auto Stop? It save a hundred of irq per second */
680 	sc->use_auto_stop = 1;
681 
682 	error = parse_fdt(sc);
683 	if (error != 0) {
684 		device_printf(dev, "Can't get FDT property.\n");
685 		return (ENXIO);
686 	}
687 
688 	DWMMC_LOCK_INIT(sc);
689 
690 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
691 		device_printf(dev, "could not allocate resources\n");
692 		return (ENXIO);
693 	}
694 
695 	/* Setup interrupt handler. */
696 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
697 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
698 	if (error != 0) {
699 		device_printf(dev, "could not setup interrupt handler.\n");
700 		return (ENXIO);
701 	}
702 
703 	device_printf(dev, "Hardware version ID is %04x\n",
704 		READ4(sc, SDMMC_VERID) & 0xffff);
705 
706 	/* XXX: we support operation for slot index 0 only */
707 	slot = 0;
708 	if (sc->pwren_inverted) {
709 		WRITE4(sc, SDMMC_PWREN, (0 << slot));
710 	} else {
711 		WRITE4(sc, SDMMC_PWREN, (1 << slot));
712 	}
713 
714 	/* Reset all */
715 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
716 				  SDMMC_CTRL_FIFO_RESET |
717 				  SDMMC_CTRL_DMA_RESET)))
718 		return (ENXIO);
719 
720 	dwmmc_setup_bus(sc, sc->host.f_min);
721 
722 	if (sc->fifo_depth == 0) {
723 		sc->fifo_depth = 1 +
724 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
725 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
726 		    sc->fifo_depth);
727 	}
728 
729 	if (!sc->use_pio) {
730 		dma_stop(sc);
731 		if (dma_setup(sc))
732 			return (ENXIO);
733 
734 		/* Install desc base */
735 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
736 
737 		/* Enable DMA interrupts */
738 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
739 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
740 					   SDMMC_IDINTEN_RI |
741 					   SDMMC_IDINTEN_TI));
742 	}
743 
744 	/* Clear and disable interrups for a while */
745 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
746 	WRITE4(sc, SDMMC_INTMASK, 0);
747 
748 	/* Maximum timeout */
749 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
750 
751 	/* Enable interrupts */
752 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
753 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
754 				   SDMMC_INTMASK_DTO |
755 				   SDMMC_INTMASK_ACD |
756 				   SDMMC_INTMASK_TXDR |
757 				   SDMMC_INTMASK_RXDR |
758 				   DWMMC_ERR_FLAGS |
759 				   SDMMC_INTMASK_CD));
760 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
761 
762 	TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
763 	TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0,
764 		dwmmc_card_task, sc);
765 
766 #ifdef MMCCAM
767 	sc->ccb = NULL;
768 	if (mmc_cam_sim_alloc(dev, "dw_mmc", &sc->mmc_sim) != 0) {
769 		device_printf(dev, "cannot alloc cam sim\n");
770 		dwmmc_detach(dev);
771 		return (ENXIO);
772 	}
773 #endif
774 	/*
775 	 * Schedule a card detection as we won't get an interrupt
776 	 * if the card is inserted when we attach
777 	 */
778 	dwmmc_card_task(sc, 0);
779 	return (0);
780 }
781 
782 int
783 dwmmc_detach(device_t dev)
784 {
785 	struct dwmmc_softc *sc;
786 	int ret;
787 
788 	sc = device_get_softc(dev);
789 
790 	ret = device_delete_children(dev);
791 	if (ret != 0)
792 		return (ret);
793 
794 	taskqueue_drain(taskqueue_swi_giant, &sc->card_task);
795 	taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task);
796 
797 	if (sc->intr_cookie != NULL) {
798 		ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
799 		if (ret != 0)
800 			return (ret);
801 	}
802 	bus_release_resources(dev, dwmmc_spec, sc->res);
803 
804 	DWMMC_LOCK_DESTROY(sc);
805 
806 #ifdef EXT_RESOURCES
807 	if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0)
808 		device_printf(sc->dev, "cannot deassert reset\n");
809 	if (sc->biu != NULL && clk_disable(sc->biu) != 0)
810 		device_printf(sc->dev, "cannot disable biu clock\n");
811 	if (sc->ciu != NULL && clk_disable(sc->ciu) != 0)
812 			device_printf(sc->dev, "cannot disable ciu clock\n");
813 
814 	if (sc->vmmc && regulator_disable(sc->vmmc) != 0)
815 		device_printf(sc->dev, "Cannot disable vmmc regulator\n");
816 	if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0)
817 		device_printf(sc->dev, "Cannot disable vqmmc regulator\n");
818 #endif
819 
820 #ifdef MMCCAM
821 	mmc_cam_sim_free(&sc->mmc_sim);
822 #endif
823 
824 	return (0);
825 }
826 
827 static int
828 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
829 {
830 	int tout;
831 	int div;
832 
833 	if (freq == 0) {
834 		WRITE4(sc, SDMMC_CLKENA, 0);
835 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
836 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
837 
838 		tout = 1000;
839 		do {
840 			if (tout-- < 0) {
841 				device_printf(sc->dev, "Failed update clk\n");
842 				return (1);
843 			}
844 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
845 
846 		return (0);
847 	}
848 
849 	WRITE4(sc, SDMMC_CLKENA, 0);
850 	WRITE4(sc, SDMMC_CLKSRC, 0);
851 
852 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
853 
854 	WRITE4(sc, SDMMC_CLKDIV, div);
855 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
856 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
857 
858 	tout = 1000;
859 	do {
860 		if (tout-- < 0) {
861 			device_printf(sc->dev, "Failed to update clk\n");
862 			return (1);
863 		}
864 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
865 
866 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
867 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
868 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
869 
870 	tout = 1000;
871 	do {
872 		if (tout-- < 0) {
873 			device_printf(sc->dev, "Failed to enable clk\n");
874 			return (1);
875 		}
876 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
877 
878 	return (0);
879 }
880 
881 static int
882 dwmmc_update_ios(device_t brdev, device_t reqdev)
883 {
884 	struct dwmmc_softc *sc;
885 	struct mmc_ios *ios;
886 	uint32_t reg;
887 	int ret = 0;
888 
889 	sc = device_get_softc(brdev);
890 	ios = &sc->host.ios;
891 
892 	dprintf("Setting up clk %u bus_width %d, timming: %d\n",
893 		ios->clock, ios->bus_width, ios->timing);
894 
895 	mmc_fdt_set_power(&sc->mmc_helper, ios->power_mode);
896 
897 	if (ios->bus_width == bus_width_8)
898 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
899 	else if (ios->bus_width == bus_width_4)
900 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
901 	else
902 		WRITE4(sc, SDMMC_CTYPE, 0);
903 
904 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
905 		/* XXX: take care about DDR or SDR use here */
906 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
907 	}
908 
909 	/* Set DDR mode */
910 	reg = READ4(sc, SDMMC_UHS_REG);
911 	if (ios->timing == bus_timing_uhs_ddr50 ||
912 	    ios->timing == bus_timing_mmc_ddr52 ||
913 	    ios->timing == bus_timing_mmc_hs400)
914 		reg |= (SDMMC_UHS_REG_DDR);
915 	else
916 		reg &= ~(SDMMC_UHS_REG_DDR);
917 	WRITE4(sc, SDMMC_UHS_REG, reg);
918 
919 	if (sc->update_ios)
920 		ret = sc->update_ios(sc, ios);
921 
922 	dwmmc_setup_bus(sc, ios->clock);
923 
924 	return (ret);
925 }
926 
927 static int
928 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
929 {
930 	struct mmc_data *data;
931 
932 	data = cmd->data;
933 
934 	if (data->flags & MMC_DATA_WRITE)
935 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
936 			BUS_DMASYNC_POSTWRITE);
937 	else
938 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
939 			BUS_DMASYNC_POSTREAD);
940 
941 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
942 	    BUS_DMASYNC_POSTWRITE);
943 
944 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
945 
946 	return (0);
947 }
948 
949 static int
950 dma_stop(struct dwmmc_softc *sc)
951 {
952 	int reg;
953 
954 	reg = READ4(sc, SDMMC_CTRL);
955 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
956 	reg |= (SDMMC_CTRL_DMA_RESET);
957 	WRITE4(sc, SDMMC_CTRL, reg);
958 
959 	reg = READ4(sc, SDMMC_BMOD);
960 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
961 	reg |= (SDMMC_BMOD_SWR);
962 	WRITE4(sc, SDMMC_BMOD, reg);
963 
964 	return (0);
965 }
966 
967 static int
968 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
969 {
970 	struct mmc_data *data;
971 	int err;
972 	int reg;
973 
974 	data = cmd->data;
975 
976 	reg = READ4(sc, SDMMC_INTMASK);
977 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
978 	WRITE4(sc, SDMMC_INTMASK, reg);
979 	dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len);
980 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
981 		data->data, data->len, dwmmc_ring_setup,
982 		sc, BUS_DMA_NOWAIT);
983 	if (err != 0)
984 		panic("dmamap_load failed\n");
985 
986 	/* Ensure the device can see the desc */
987 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
988 	    BUS_DMASYNC_PREWRITE);
989 
990 	if (data->flags & MMC_DATA_WRITE)
991 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
992 			BUS_DMASYNC_PREWRITE);
993 	else
994 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
995 			BUS_DMASYNC_PREREAD);
996 
997 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
998 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
999 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1000 
1001 	WRITE4(sc, SDMMC_FIFOTH, reg);
1002 	wmb();
1003 
1004 	reg = READ4(sc, SDMMC_CTRL);
1005 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
1006 	WRITE4(sc, SDMMC_CTRL, reg);
1007 	wmb();
1008 
1009 	reg = READ4(sc, SDMMC_BMOD);
1010 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
1011 	WRITE4(sc, SDMMC_BMOD, reg);
1012 
1013 	/* Start */
1014 	WRITE4(sc, SDMMC_PLDMND, 1);
1015 
1016 	return (0);
1017 }
1018 
1019 static int
1020 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
1021 {
1022 	struct mmc_data *data;
1023 	int reg;
1024 
1025 	data = cmd->data;
1026 	data->xfer_len = 0;
1027 
1028 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1029 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1030 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1031 
1032 	WRITE4(sc, SDMMC_FIFOTH, reg);
1033 	wmb();
1034 
1035 	return (0);
1036 }
1037 
1038 static void
1039 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
1040 {
1041 	struct mmc_data *data;
1042 	uint32_t *p, status;
1043 
1044 	if (cmd == NULL || cmd->data == NULL)
1045 		return;
1046 
1047 	data = cmd->data;
1048 	if ((data->flags & MMC_DATA_READ) == 0)
1049 		return;
1050 
1051 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1052 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1053 
1054 	while (data->xfer_len < data->len) {
1055 		status = READ4(sc, SDMMC_STATUS);
1056 		if (status & SDMMC_STATUS_FIFO_EMPTY)
1057 			break;
1058 		*p++ = READ4(sc, SDMMC_DATA);
1059 		data->xfer_len += 4;
1060 	}
1061 
1062 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
1063 }
1064 
1065 static void
1066 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
1067 {
1068 	struct mmc_data *data;
1069 	uint32_t *p, status;
1070 
1071 	if (cmd == NULL || cmd->data == NULL)
1072 		return;
1073 
1074 	data = cmd->data;
1075 	if ((data->flags & MMC_DATA_WRITE) == 0)
1076 		return;
1077 
1078 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1079 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1080 
1081 	while (data->xfer_len < data->len) {
1082 		status = READ4(sc, SDMMC_STATUS);
1083 		if (status & SDMMC_STATUS_FIFO_FULL)
1084 			break;
1085 		WRITE4(sc, SDMMC_DATA, *p++);
1086 		data->xfer_len += 4;
1087 	}
1088 
1089 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
1090 }
1091 
1092 static void
1093 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
1094 {
1095 	struct mmc_data *data;
1096 	uint32_t blksz;
1097 	uint32_t cmdr;
1098 
1099 	dprintf("%s\n", __func__);
1100 	sc->curcmd = cmd;
1101 	data = cmd->data;
1102 
1103 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
1104 		dwmmc_setup_bus(sc, sc->host.ios.clock);
1105 
1106 #ifndef MMCCAM
1107 	/* XXX Upper layers don't always set this */
1108 	cmd->mrq = sc->req;
1109 #endif
1110 	/* Begin setting up command register. */
1111 
1112 	cmdr = cmd->opcode;
1113 
1114 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
1115 
1116 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
1117 	    cmd->opcode == MMC_GO_IDLE_STATE ||
1118 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
1119 		cmdr |= SDMMC_CMD_STOP_ABORT;
1120 	else if (cmd->opcode != MMC_SEND_STATUS && data)
1121 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
1122 
1123 	/* Set up response handling. */
1124 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
1125 		cmdr |= SDMMC_CMD_RESP_EXP;
1126 		if (cmd->flags & MMC_RSP_136)
1127 			cmdr |= SDMMC_CMD_RESP_LONG;
1128 	}
1129 
1130 	if (cmd->flags & MMC_RSP_CRC)
1131 		cmdr |= SDMMC_CMD_RESP_CRC;
1132 
1133 	/*
1134 	 * XXX: Not all platforms want this.
1135 	 */
1136 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
1137 
1138 	if ((sc->flags & CARD_INIT_DONE) == 0) {
1139 		sc->flags |= (CARD_INIT_DONE);
1140 		cmdr |= SDMMC_CMD_SEND_INIT;
1141 	}
1142 
1143 	if (data) {
1144 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1145 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
1146 		     sc->use_auto_stop)
1147 			cmdr |= SDMMC_CMD_SEND_ASTOP;
1148 
1149 		cmdr |= SDMMC_CMD_DATA_EXP;
1150 		if (data->flags & MMC_DATA_STREAM)
1151 			cmdr |= SDMMC_CMD_MODE_STREAM;
1152 		if (data->flags & MMC_DATA_WRITE)
1153 			cmdr |= SDMMC_CMD_DATA_WRITE;
1154 
1155 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
1156 #ifdef MMCCAM
1157 		if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1158 			WRITE4(sc, SDMMC_BLKSIZ, cmd->data->block_size);
1159 			WRITE4(sc, SDMMC_BYTCNT, cmd->data->len);
1160 		} else
1161 #endif
1162 		{
1163 			WRITE4(sc, SDMMC_BYTCNT, data->len);
1164 			blksz = (data->len < MMC_SECTOR_SIZE) ? \
1165 				data->len : MMC_SECTOR_SIZE;
1166 			WRITE4(sc, SDMMC_BLKSIZ, blksz);
1167 		}
1168 
1169 		if (sc->use_pio) {
1170 			pio_prepare(sc, cmd);
1171 		} else {
1172 			dma_prepare(sc, cmd);
1173 		}
1174 		wmb();
1175 	}
1176 
1177 	dprintf("cmdr 0x%08x\n", cmdr);
1178 
1179 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
1180 	wmb();
1181 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
1182 };
1183 
1184 static void
1185 dwmmc_next_operation(struct dwmmc_softc *sc)
1186 {
1187 	struct mmc_command *cmd;
1188 	dprintf("%s\n", __func__);
1189 #ifdef MMCCAM
1190 	union ccb *ccb;
1191 
1192 	ccb = sc->ccb;
1193 	if (ccb == NULL)
1194 		return;
1195 	cmd = &ccb->mmcio.cmd;
1196 #else
1197 	struct mmc_request *req;
1198 
1199 	req = sc->req;
1200 	if (req == NULL)
1201 		return;
1202 	cmd = req->cmd;
1203 #endif
1204 
1205 	sc->acd_rcvd = 0;
1206 	sc->dto_rcvd = 0;
1207 	sc->cmd_done = 0;
1208 
1209 	/*
1210 	 * XXX: Wait until card is still busy.
1211 	 * We do need this to prevent data timeouts,
1212 	 * mostly caused by multi-block write command
1213 	 * followed by single-read.
1214 	 */
1215 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1216 		continue;
1217 
1218 	if (sc->flags & PENDING_CMD) {
1219 		sc->flags &= ~PENDING_CMD;
1220 		dwmmc_start_cmd(sc, cmd);
1221 		return;
1222 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1223 		sc->flags &= ~PENDING_STOP;
1224 		/// XXX: What to do with this?
1225 		//dwmmc_start_cmd(sc, req->stop);
1226 		return;
1227 	}
1228 
1229 #ifdef MMCCAM
1230 	sc->ccb = NULL;
1231 	sc->curcmd = NULL;
1232 	ccb->ccb_h.status =
1233 		(ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
1234 	xpt_done(ccb);
1235 #else
1236 	sc->req = NULL;
1237 	sc->curcmd = NULL;
1238 	req->done(req);
1239 #endif
1240 }
1241 
1242 static int
1243 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1244 {
1245 	struct dwmmc_softc *sc;
1246 
1247 	sc = device_get_softc(brdev);
1248 
1249 	dprintf("%s\n", __func__);
1250 
1251 	DWMMC_LOCK(sc);
1252 
1253 #ifdef MMCCAM
1254 	sc->flags |= PENDING_CMD;
1255 #else
1256 	if (sc->req != NULL) {
1257 		DWMMC_UNLOCK(sc);
1258 		return (EBUSY);
1259 	}
1260 
1261 	sc->req = req;
1262 	sc->flags |= PENDING_CMD;
1263 	if (sc->req->stop)
1264 		sc->flags |= PENDING_STOP;
1265 #endif
1266 	dwmmc_next_operation(sc);
1267 
1268 	DWMMC_UNLOCK(sc);
1269 	return (0);
1270 }
1271 
1272 #ifndef MMCCAM
1273 static int
1274 dwmmc_get_ro(device_t brdev, device_t reqdev)
1275 {
1276 
1277 	dprintf("%s\n", __func__);
1278 
1279 	return (0);
1280 }
1281 
1282 static int
1283 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1284 {
1285 	struct dwmmc_softc *sc;
1286 
1287 	sc = device_get_softc(brdev);
1288 
1289 	DWMMC_LOCK(sc);
1290 	while (sc->bus_busy)
1291 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1292 	sc->bus_busy++;
1293 	DWMMC_UNLOCK(sc);
1294 	return (0);
1295 }
1296 
1297 static int
1298 dwmmc_release_host(device_t brdev, device_t reqdev)
1299 {
1300 	struct dwmmc_softc *sc;
1301 
1302 	sc = device_get_softc(brdev);
1303 
1304 	DWMMC_LOCK(sc);
1305 	sc->bus_busy--;
1306 	wakeup(sc);
1307 	DWMMC_UNLOCK(sc);
1308 	return (0);
1309 }
1310 #endif	/* !MMCCAM */
1311 
1312 static int
1313 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1314 {
1315 	struct dwmmc_softc *sc;
1316 
1317 	sc = device_get_softc(bus);
1318 
1319 	switch (which) {
1320 	default:
1321 		return (EINVAL);
1322 	case MMCBR_IVAR_BUS_MODE:
1323 		*(int *)result = sc->host.ios.bus_mode;
1324 		break;
1325 	case MMCBR_IVAR_BUS_WIDTH:
1326 		*(int *)result = sc->host.ios.bus_width;
1327 		break;
1328 	case MMCBR_IVAR_CHIP_SELECT:
1329 		*(int *)result = sc->host.ios.chip_select;
1330 		break;
1331 	case MMCBR_IVAR_CLOCK:
1332 		*(int *)result = sc->host.ios.clock;
1333 		break;
1334 	case MMCBR_IVAR_F_MIN:
1335 		*(int *)result = sc->host.f_min;
1336 		break;
1337 	case MMCBR_IVAR_F_MAX:
1338 		*(int *)result = sc->host.f_max;
1339 		break;
1340 	case MMCBR_IVAR_HOST_OCR:
1341 		*(int *)result = sc->host.host_ocr;
1342 		break;
1343 	case MMCBR_IVAR_MODE:
1344 		*(int *)result = sc->host.mode;
1345 		break;
1346 	case MMCBR_IVAR_OCR:
1347 		*(int *)result = sc->host.ocr;
1348 		break;
1349 	case MMCBR_IVAR_POWER_MODE:
1350 		*(int *)result = sc->host.ios.power_mode;
1351 		break;
1352 	case MMCBR_IVAR_VDD:
1353 		*(int *)result = sc->host.ios.vdd;
1354 		break;
1355 	case MMCBR_IVAR_VCCQ:
1356 		*(int *)result = sc->host.ios.vccq;
1357 		break;
1358 	case MMCBR_IVAR_CAPS:
1359 		*(int *)result = sc->host.caps;
1360 		break;
1361 	case MMCBR_IVAR_MAX_DATA:
1362 		/*
1363 		 * Busdma may bounce buffers, so we must reserve 2 descriptors
1364 		 * (on start and on end) for bounced fragments.
1365 		 *
1366 		 */
1367 		*(int *)result = (IDMAC_MAX_SIZE * IDMAC_DESC_SEGS) /
1368 		    MMC_SECTOR_SIZE - 3;
1369 		break;
1370 	case MMCBR_IVAR_TIMING:
1371 		*(int *)result = sc->host.ios.timing;
1372 		break;
1373 	}
1374 	return (0);
1375 }
1376 
1377 static int
1378 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1379 {
1380 	struct dwmmc_softc *sc;
1381 
1382 	sc = device_get_softc(bus);
1383 
1384 	switch (which) {
1385 	default:
1386 		return (EINVAL);
1387 	case MMCBR_IVAR_BUS_MODE:
1388 		sc->host.ios.bus_mode = value;
1389 		break;
1390 	case MMCBR_IVAR_BUS_WIDTH:
1391 		sc->host.ios.bus_width = value;
1392 		break;
1393 	case MMCBR_IVAR_CHIP_SELECT:
1394 		sc->host.ios.chip_select = value;
1395 		break;
1396 	case MMCBR_IVAR_CLOCK:
1397 		sc->host.ios.clock = value;
1398 		break;
1399 	case MMCBR_IVAR_MODE:
1400 		sc->host.mode = value;
1401 		break;
1402 	case MMCBR_IVAR_OCR:
1403 		sc->host.ocr = value;
1404 		break;
1405 	case MMCBR_IVAR_POWER_MODE:
1406 		sc->host.ios.power_mode = value;
1407 		break;
1408 	case MMCBR_IVAR_VDD:
1409 		sc->host.ios.vdd = value;
1410 		break;
1411 	case MMCBR_IVAR_TIMING:
1412 		sc->host.ios.timing = value;
1413 		break;
1414 	case MMCBR_IVAR_VCCQ:
1415 		sc->host.ios.vccq = value;
1416 		break;
1417 	/* These are read-only */
1418 	case MMCBR_IVAR_CAPS:
1419 	case MMCBR_IVAR_HOST_OCR:
1420 	case MMCBR_IVAR_F_MIN:
1421 	case MMCBR_IVAR_F_MAX:
1422 	case MMCBR_IVAR_MAX_DATA:
1423 		return (EINVAL);
1424 	}
1425 	return (0);
1426 }
1427 
1428 #ifdef MMCCAM
1429 /* Note: this function likely belongs to the specific driver impl */
1430 static int
1431 dwmmc_switch_vccq(device_t dev, device_t child)
1432 {
1433 	device_printf(dev, "This is a default impl of switch_vccq() that always fails\n");
1434 	return EINVAL;
1435 }
1436 
1437 static int
1438 dwmmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1439 {
1440 	struct dwmmc_softc *sc;
1441 
1442 	sc = device_get_softc(dev);
1443 
1444 	cts->host_ocr = sc->host.host_ocr;
1445 	cts->host_f_min = sc->host.f_min;
1446 	cts->host_f_max = sc->host.f_max;
1447 	cts->host_caps = sc->host.caps;
1448 	cts->host_max_data = (IDMAC_MAX_SIZE * IDMAC_DESC_SEGS) / MMC_SECTOR_SIZE;
1449 	memcpy(&cts->ios, &sc->host.ios, sizeof(struct mmc_ios));
1450 
1451 	return (0);
1452 }
1453 
1454 static int
1455 dwmmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1456 {
1457 	struct dwmmc_softc *sc;
1458 	struct mmc_ios *ios;
1459 	struct mmc_ios *new_ios;
1460 	int res;
1461 
1462 	sc = device_get_softc(dev);
1463 	ios = &sc->host.ios;
1464 
1465 	new_ios = &cts->ios;
1466 
1467 	/* Update only requested fields */
1468 	if (cts->ios_valid & MMC_CLK) {
1469 		ios->clock = new_ios->clock;
1470 		if (bootverbose)
1471 			device_printf(sc->dev, "Clock => %d\n", ios->clock);
1472 	}
1473 	if (cts->ios_valid & MMC_VDD) {
1474 		ios->vdd = new_ios->vdd;
1475 		if (bootverbose)
1476 			device_printf(sc->dev, "VDD => %d\n", ios->vdd);
1477 	}
1478 	if (cts->ios_valid & MMC_CS) {
1479 		ios->chip_select = new_ios->chip_select;
1480 		if (bootverbose)
1481 			device_printf(sc->dev, "CS => %d\n", ios->chip_select);
1482 	}
1483 	if (cts->ios_valid & MMC_BW) {
1484 		ios->bus_width = new_ios->bus_width;
1485 		if (bootverbose)
1486 			device_printf(sc->dev, "Bus width => %d\n", ios->bus_width);
1487 	}
1488 	if (cts->ios_valid & MMC_PM) {
1489 		ios->power_mode = new_ios->power_mode;
1490 		if (bootverbose)
1491 			device_printf(sc->dev, "Power mode => %d\n", ios->power_mode);
1492 	}
1493 	if (cts->ios_valid & MMC_BT) {
1494 		ios->timing = new_ios->timing;
1495 		if (bootverbose)
1496 			device_printf(sc->dev, "Timing => %d\n", ios->timing);
1497 	}
1498 	if (cts->ios_valid & MMC_BM) {
1499 		ios->bus_mode = new_ios->bus_mode;
1500 		if (bootverbose)
1501 			device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode);
1502 	}
1503 	if (cts->ios_valid & MMC_VCCQ) {
1504 		ios->vccq = new_ios->vccq;
1505 		if (bootverbose)
1506 			device_printf(sc->dev, "VCCQ => %d\n", ios->vccq);
1507 		res = dwmmc_switch_vccq(sc->dev, NULL);
1508 		device_printf(sc->dev, "VCCQ switch result: %d\n", res);
1509 	}
1510 
1511 	return (dwmmc_update_ios(sc->dev, NULL));
1512 }
1513 
1514 static int
1515 dwmmc_cam_request(device_t dev, union ccb *ccb)
1516 {
1517 	struct dwmmc_softc *sc;
1518 	struct ccb_mmcio *mmcio;
1519 
1520 	sc = device_get_softc(dev);
1521 	mmcio = &ccb->mmcio;
1522 
1523 	DWMMC_LOCK(sc);
1524 
1525 #ifdef DEBUG
1526 	if (__predict_false(bootverbose)) {
1527 		device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1528 			    mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
1529 			    mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
1530 			    mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
1531 	}
1532 #endif
1533 	if (mmcio->cmd.data != NULL) {
1534 		if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
1535 			panic("data->len = %d, data->flags = %d -- something is b0rked",
1536 			      (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
1537 	}
1538 	if (sc->ccb != NULL) {
1539 		device_printf(sc->dev, "Controller still has an active command\n");
1540 		return (EBUSY);
1541 	}
1542 	sc->ccb = ccb;
1543 	DWMMC_UNLOCK(sc);
1544 	dwmmc_request(sc->dev, NULL, NULL);
1545 
1546 	return (0);
1547 }
1548 #endif /* MMCCAM */
1549 
1550 static device_method_t dwmmc_methods[] = {
1551 	/* Bus interface */
1552 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1553 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1554 
1555 #ifndef MMCCAM
1556 	/* mmcbr_if */
1557 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1558 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1559 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1560 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1561 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1562 #endif
1563 
1564 #ifdef MMCCAM
1565 	/* MMCCAM interface */
1566 	DEVMETHOD(mmc_sim_get_tran_settings,	dwmmc_get_tran_settings),
1567 	DEVMETHOD(mmc_sim_set_tran_settings,	dwmmc_set_tran_settings),
1568 	DEVMETHOD(mmc_sim_cam_request,		dwmmc_cam_request),
1569 
1570 	DEVMETHOD(bus_add_child,		bus_generic_add_child),
1571 #endif
1572 
1573 	DEVMETHOD_END
1574 };
1575 
1576 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1577     sizeof(struct dwmmc_softc));
1578