xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision 7be9a3b45356747f9fcb6d69a722c1c95f8060bf)
1 /*-
2  * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/conf.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/module.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/rman.h>
50 #include <sys/queue.h>
51 #include <sys/taskqueue.h>
52 
53 #include <dev/mmc/bridge.h>
54 #include <dev/mmc/mmcbrvar.h>
55 #include <dev/mmc/mmc_fdt_helpers.h>
56 
57 #include <dev/fdt/fdt_common.h>
58 #include <dev/ofw/openfirm.h>
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 
62 #include <machine/bus.h>
63 #include <machine/cpu.h>
64 #include <machine/intr.h>
65 
66 #ifdef EXT_RESOURCES
67 #include <dev/extres/clk/clk.h>
68 #endif
69 
70 #include <dev/mmc/host/dwmmc_reg.h>
71 #include <dev/mmc/host/dwmmc_var.h>
72 
73 #include "opt_mmccam.h"
74 
75 #ifdef MMCCAM
76 #include <cam/cam.h>
77 #include <cam/cam_ccb.h>
78 #include <cam/cam_debug.h>
79 #include <cam/cam_sim.h>
80 #include <cam/cam_xpt_sim.h>
81 
82 #include "mmc_sim_if.h"
83 #endif
84 
85 #include "mmcbr_if.h"
86 
87 #ifdef DEBUG
88 #define dprintf(fmt, args...) printf(fmt, ##args)
89 #else
90 #define dprintf(x, arg...)
91 #endif
92 
93 #define	READ4(_sc, _reg) \
94 	bus_read_4((_sc)->res[0], _reg)
95 #define	WRITE4(_sc, _reg, _val) \
96 	bus_write_4((_sc)->res[0], _reg, _val)
97 
98 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
99 
100 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
101 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
102 #define	DWMMC_LOCK_INIT(_sc) \
103 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
104 	    "dwmmc", MTX_DEF)
105 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
106 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
107 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
108 
109 #define	PENDING_CMD	0x01
110 #define	PENDING_STOP	0x02
111 #define	CARD_INIT_DONE	0x04
112 
113 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
114 				|SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE)
115 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
116 				|SDMMC_INTMASK_RE)
117 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
118 				|SDMMC_INTMASK_HLE)
119 
120 #define	DES0_DIC	(1 << 1)	/* Disable Interrupt on Completion */
121 #define	DES0_LD		(1 << 2)	/* Last Descriptor */
122 #define	DES0_FS		(1 << 3)	/* First Descriptor */
123 #define	DES0_CH		(1 << 4)	/* second address CHained */
124 #define	DES0_ER		(1 << 5)	/* End of Ring */
125 #define	DES0_CES	(1 << 30)	/* Card Error Summary */
126 #define	DES0_OWN	(1 << 31)	/* OWN */
127 
128 #define	DES1_BS1_MASK	0x1fff
129 
130 struct idmac_desc {
131 	uint32_t	des0;	/* control */
132 	uint32_t	des1;	/* bufsize */
133 	uint32_t	des2;	/* buf1 phys addr */
134 	uint32_t	des3;	/* buf2 phys addr or next descr */
135 };
136 
137 #define	IDMAC_DESC_SEGS	(PAGE_SIZE / (sizeof(struct idmac_desc)))
138 #define	IDMAC_DESC_SIZE	(sizeof(struct idmac_desc) * IDMAC_DESC_SEGS)
139 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
140 /*
141  * Size field in DMA descriptor is 13 bits long (up to 4095 bytes),
142  * but must be a multiple of the data bus size.Additionally, we must ensure
143  * that bus_dmamap_load() doesn't additionally fragments buffer (because it
144  * is processed with page size granularity). Thus limit fragment size to half
145  * of page.
146  * XXX switch descriptor format to array and use second buffer pointer for
147  * second half of page
148  */
149 #define	IDMAC_MAX_SIZE	2048
150 /*
151  * Busdma may bounce buffers, so we must reserve 2 descriptors
152  * (on start and on end) for bounced fragments.
153  */
154 #define DWMMC_MAX_DATA	(IDMAC_MAX_SIZE * (IDMAC_DESC_SEGS - 2)) / MMC_SECTOR_SIZE
155 
156 static void dwmmc_next_operation(struct dwmmc_softc *);
157 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
158 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
159 static int dma_stop(struct dwmmc_softc *);
160 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
161 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
162 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present);
163 
164 static struct resource_spec dwmmc_spec[] = {
165 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
166 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
167 	{ -1, 0 }
168 };
169 
170 #define	HWTYPE_MASK		(0x0000ffff)
171 #define	HWFLAG_MASK		(0xffff << 16)
172 
173 static void
174 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
175 {
176 
177 	if (nsegs != 1)
178 		panic("%s: nsegs != 1 (%d)\n", __func__, nsegs);
179 	if (error != 0)
180 		panic("%s: error != 0 (%d)\n", __func__, error);
181 
182 	*(bus_addr_t *)arg = segs[0].ds_addr;
183 }
184 
185 static void
186 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
187 {
188 	struct dwmmc_softc *sc;
189 	int idx;
190 
191 	sc = arg;
192 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
193 	if (error != 0)
194 		panic("%s: error != 0 (%d)\n", __func__, error);
195 
196 	for (idx = 0; idx < nsegs; idx++) {
197 		sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH;
198 		sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK;
199 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
200 
201 		if (idx == 0)
202 			sc->desc_ring[idx].des0 |= DES0_FS;
203 
204 		if (idx == (nsegs - 1)) {
205 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
206 			sc->desc_ring[idx].des0 |= DES0_LD;
207 		}
208 		wmb();
209 		sc->desc_ring[idx].des0 |= DES0_OWN;
210 	}
211 }
212 
213 static int
214 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
215 {
216 	int reg;
217 	int i;
218 
219 	reg = READ4(sc, SDMMC_CTRL);
220 	reg |= (reset_bits);
221 	WRITE4(sc, SDMMC_CTRL, reg);
222 
223 	/* Wait reset done */
224 	for (i = 0; i < 100; i++) {
225 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
226 			return (0);
227 		DELAY(10);
228 	}
229 
230 	device_printf(sc->dev, "Reset failed\n");
231 
232 	return (1);
233 }
234 
235 static int
236 dma_setup(struct dwmmc_softc *sc)
237 {
238 	int error;
239 	int nidx;
240 	int idx;
241 
242 	/*
243 	 * Set up TX descriptor ring, descriptors, and dma maps.
244 	 */
245 	error = bus_dma_tag_create(
246 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
247 	    4096, 0,			/* alignment, boundary */
248 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
249 	    BUS_SPACE_MAXADDR,		/* highaddr */
250 	    NULL, NULL,			/* filter, filterarg */
251 	    IDMAC_DESC_SIZE, 1,		/* maxsize, nsegments */
252 	    IDMAC_DESC_SIZE,		/* maxsegsize */
253 	    0,				/* flags */
254 	    NULL, NULL,			/* lockfunc, lockarg */
255 	    &sc->desc_tag);
256 	if (error != 0) {
257 		device_printf(sc->dev,
258 		    "could not create ring DMA tag.\n");
259 		return (1);
260 	}
261 
262 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
263 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
264 	    &sc->desc_map);
265 	if (error != 0) {
266 		device_printf(sc->dev,
267 		    "could not allocate descriptor ring.\n");
268 		return (1);
269 	}
270 
271 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
272 	    sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr,
273 	    &sc->desc_ring_paddr, 0);
274 	if (error != 0) {
275 		device_printf(sc->dev,
276 		    "could not load descriptor ring map.\n");
277 		return (1);
278 	}
279 
280 	for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) {
281 		sc->desc_ring[idx].des0 = DES0_CH;
282 		sc->desc_ring[idx].des1 = 0;
283 		nidx = (idx + 1) % IDMAC_DESC_SEGS;
284 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
285 		    (nidx * sizeof(struct idmac_desc));
286 	}
287 	sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr;
288 	sc->desc_ring[idx - 1].des0 |= DES0_ER;
289 
290 	error = bus_dma_tag_create(
291 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
292 	    8, 0,			/* alignment, boundary */
293 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
294 	    BUS_SPACE_MAXADDR,		/* highaddr */
295 	    NULL, NULL,			/* filter, filterarg */
296 	    IDMAC_MAX_SIZE * IDMAC_DESC_SEGS,	/* maxsize */
297 	    IDMAC_DESC_SEGS,		/* nsegments */
298 	    IDMAC_MAX_SIZE,		/* maxsegsize */
299 	    0,				/* flags */
300 	    NULL, NULL,			/* lockfunc, lockarg */
301 	    &sc->buf_tag);
302 	if (error != 0) {
303 		device_printf(sc->dev,
304 		    "could not create ring DMA tag.\n");
305 		return (1);
306 	}
307 
308 	error = bus_dmamap_create(sc->buf_tag, 0,
309 	    &sc->buf_map);
310 	if (error != 0) {
311 		device_printf(sc->dev,
312 		    "could not create TX buffer DMA map.\n");
313 		return (1);
314 	}
315 
316 	return (0);
317 }
318 
319 static void
320 dwmmc_cmd_done(struct dwmmc_softc *sc)
321 {
322 	struct mmc_command *cmd;
323 #ifdef MMCCAM
324 	union ccb *ccb;
325 #endif
326 
327 #ifdef MMCCAM
328 	ccb = sc->ccb;
329 	if (ccb == NULL)
330 		return;
331 	cmd = &ccb->mmcio.cmd;
332 #else
333 	cmd = sc->curcmd;
334 #endif
335 	if (cmd == NULL)
336 		return;
337 
338 	if (cmd->flags & MMC_RSP_PRESENT) {
339 		if (cmd->flags & MMC_RSP_136) {
340 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
341 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
342 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
343 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
344 		} else {
345 			cmd->resp[3] = 0;
346 			cmd->resp[2] = 0;
347 			cmd->resp[1] = 0;
348 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
349 		}
350 	}
351 }
352 
353 static void
354 dwmmc_tasklet(struct dwmmc_softc *sc)
355 {
356 	struct mmc_command *cmd;
357 
358 	cmd = sc->curcmd;
359 	if (cmd == NULL)
360 		return;
361 
362 	if (!sc->cmd_done)
363 		return;
364 
365 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
366 		dwmmc_next_operation(sc);
367 	} else if (cmd->data && sc->dto_rcvd) {
368 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
369 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
370 		     sc->use_auto_stop) {
371 			if (sc->acd_rcvd)
372 				dwmmc_next_operation(sc);
373 		} else {
374 			dwmmc_next_operation(sc);
375 		}
376 	}
377 }
378 
379 static void
380 dwmmc_intr(void *arg)
381 {
382 	struct mmc_command *cmd;
383 	struct dwmmc_softc *sc;
384 	uint32_t reg;
385 
386 	sc = arg;
387 
388 	DWMMC_LOCK(sc);
389 
390 	cmd = sc->curcmd;
391 
392 	/* First handle SDMMC controller interrupts */
393 	reg = READ4(sc, SDMMC_MINTSTS);
394 	if (reg) {
395 		dprintf("%s 0x%08x\n", __func__, reg);
396 
397 		if (reg & DWMMC_CMD_ERR_FLAGS) {
398 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
399 				reg, cmd->opcode);
400 			cmd->error = MMC_ERR_TIMEOUT;
401 		}
402 
403 		if (reg & DWMMC_DATA_ERR_FLAGS) {
404 			dprintf("data err 0x%08x cmd 0x%08x\n",
405 				reg, cmd->opcode);
406 			cmd->error = MMC_ERR_FAILED;
407 			if (!sc->use_pio) {
408 				dma_done(sc, cmd);
409 				dma_stop(sc);
410 			}
411 		}
412 
413 		if (reg & SDMMC_INTMASK_CMD_DONE) {
414 			dwmmc_cmd_done(sc);
415 			sc->cmd_done = 1;
416 		}
417 
418 		if (reg & SDMMC_INTMASK_ACD)
419 			sc->acd_rcvd = 1;
420 
421 		if (reg & SDMMC_INTMASK_DTO)
422 			sc->dto_rcvd = 1;
423 
424 		if (reg & SDMMC_INTMASK_CD) {
425 			dwmmc_handle_card_present(sc,
426 			    READ4(sc, SDMMC_CDETECT) == 0 ? true : false);
427 		}
428 	}
429 
430 	/* Ack interrupts */
431 	WRITE4(sc, SDMMC_RINTSTS, reg);
432 
433 	if (sc->use_pio) {
434 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
435 			pio_read(sc, cmd);
436 		}
437 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
438 			pio_write(sc, cmd);
439 		}
440 	} else {
441 		/* Now handle DMA interrupts */
442 		reg = READ4(sc, SDMMC_IDSTS);
443 		if (reg) {
444 			dprintf("dma intr 0x%08x\n", reg);
445 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
446 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
447 							 SDMMC_IDINTEN_RI));
448 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
449 				dma_done(sc, cmd);
450 			}
451 		}
452 	}
453 
454 	dwmmc_tasklet(sc);
455 
456 	DWMMC_UNLOCK(sc);
457 }
458 
459 static void
460 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
461 {
462 	bool was_present;
463 
464 	if (dumping || SCHEDULER_STOPPED())
465 		return;
466 
467 	was_present = sc->child != NULL;
468 
469 	if (!was_present && is_present) {
470 		taskqueue_enqueue_timeout(taskqueue_swi_giant,
471 		  &sc->card_delayed_task, -(hz / 2));
472 	} else if (was_present && !is_present) {
473 		taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task);
474 	}
475 }
476 
477 static void
478 dwmmc_card_task(void *arg, int pending __unused)
479 {
480 	struct dwmmc_softc *sc = arg;
481 
482 #ifdef MMCCAM
483 	mmc_cam_sim_discover(&sc->mmc_sim);
484 #else
485 	DWMMC_LOCK(sc);
486 
487 	if (READ4(sc, SDMMC_CDETECT) == 0 ||
488 	    (sc->mmc_helper.props & MMC_PROP_BROKEN_CD)) {
489 		if (sc->child == NULL) {
490 			if (bootverbose)
491 				device_printf(sc->dev, "Card inserted\n");
492 
493 			sc->child = device_add_child(sc->dev, "mmc", -1);
494 			DWMMC_UNLOCK(sc);
495 			if (sc->child) {
496 				device_set_ivars(sc->child, sc);
497 				(void)device_probe_and_attach(sc->child);
498 			}
499 		} else
500 			DWMMC_UNLOCK(sc);
501 	} else {
502 		/* Card isn't present, detach if necessary */
503 		if (sc->child != NULL) {
504 			if (bootverbose)
505 				device_printf(sc->dev, "Card removed\n");
506 
507 			DWMMC_UNLOCK(sc);
508 			device_delete_child(sc->dev, sc->child);
509 			sc->child = NULL;
510 		} else
511 			DWMMC_UNLOCK(sc);
512 	}
513 #endif /* MMCCAM */
514 }
515 
516 static int
517 parse_fdt(struct dwmmc_softc *sc)
518 {
519 	pcell_t dts_value[3];
520 	phandle_t node;
521 	uint32_t bus_hz = 0;
522 	int len;
523 #ifdef EXT_RESOURCES
524 	int error;
525 #endif
526 
527 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
528 		return (ENXIO);
529 
530 	/* Set some defaults for freq and supported mode */
531 	sc->host.f_min = 400000;
532 	sc->host.f_max = 200000000;
533 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
534 	sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
535 	mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host);
536 
537 	/* fifo-depth */
538 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
539 		OF_getencprop(node, "fifo-depth", dts_value, len);
540 		sc->fifo_depth = dts_value[0];
541 	}
542 
543 	/* num-slots (Deprecated) */
544 	sc->num_slots = 1;
545 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
546 		device_printf(sc->dev, "num-slots property is deprecated\n");
547 		OF_getencprop(node, "num-slots", dts_value, len);
548 		sc->num_slots = dts_value[0];
549 	}
550 
551 	/* clock-frequency */
552 	if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
553 		OF_getencprop(node, "clock-frequency", dts_value, len);
554 		bus_hz = dts_value[0];
555 	}
556 
557 #ifdef EXT_RESOURCES
558 
559 	/* IP block reset is optional */
560 	error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset);
561 	if (error != 0 &&
562 	    error != ENOENT &&
563 	    error != ENODEV) {
564 		device_printf(sc->dev, "Cannot get reset\n");
565 		goto fail;
566 	}
567 
568 	/* vmmc regulator is optional */
569 	error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply",
570 	     &sc->vmmc);
571 	if (error != 0 &&
572 	    error != ENOENT &&
573 	    error != ENODEV) {
574 		device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n");
575 		goto fail;
576 	}
577 
578 	/* vqmmc regulator is optional */
579 	error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply",
580 	     &sc->vqmmc);
581 	if (error != 0 &&
582 	    error != ENOENT &&
583 	    error != ENODEV) {
584 		device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n");
585 		goto fail;
586 	}
587 
588 	/* Assert reset first */
589 	if (sc->hwreset != NULL) {
590 		error = hwreset_assert(sc->hwreset);
591 		if (error != 0) {
592 			device_printf(sc->dev, "Cannot assert reset\n");
593 			goto fail;
594 		}
595 	}
596 
597 	/* BIU (Bus Interface Unit clock) is optional */
598 	error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
599 	if (error != 0 &&
600 	    error != ENOENT &&
601 	    error != ENODEV) {
602 		device_printf(sc->dev, "Cannot get 'biu' clock\n");
603 		goto fail;
604 	}
605 
606 	if (sc->biu) {
607 		error = clk_enable(sc->biu);
608 		if (error != 0) {
609 			device_printf(sc->dev, "cannot enable biu clock\n");
610 			goto fail;
611 		}
612 	}
613 
614 	/*
615 	 * CIU (Controller Interface Unit clock) is mandatory
616 	 * if no clock-frequency property is given
617 	 */
618 	error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
619 	if (error != 0 &&
620 	    error != ENOENT &&
621 	    error != ENODEV) {
622 		device_printf(sc->dev, "Cannot get 'ciu' clock\n");
623 		goto fail;
624 	}
625 
626 	if (sc->ciu) {
627 		if (bus_hz != 0) {
628 			error = clk_set_freq(sc->ciu, bus_hz, 0);
629 			if (error != 0)
630 				device_printf(sc->dev,
631 				    "cannot set ciu clock to %u\n", bus_hz);
632 		}
633 		error = clk_enable(sc->ciu);
634 		if (error != 0) {
635 			device_printf(sc->dev, "cannot enable ciu clock\n");
636 			goto fail;
637 		}
638 		clk_get_freq(sc->ciu, &sc->bus_hz);
639 	}
640 
641 	/* Enable regulators */
642 	if (sc->vmmc != NULL) {
643 		error = regulator_enable(sc->vmmc);
644 		if (error != 0) {
645 			device_printf(sc->dev, "Cannot enable vmmc regulator\n");
646 			goto fail;
647 		}
648 	}
649 	if (sc->vqmmc != NULL) {
650 		error = regulator_enable(sc->vqmmc);
651 		if (error != 0) {
652 			device_printf(sc->dev, "Cannot enable vqmmc regulator\n");
653 			goto fail;
654 		}
655 	}
656 
657 	/* Take dwmmc out of reset */
658 	if (sc->hwreset != NULL) {
659 		error = hwreset_deassert(sc->hwreset);
660 		if (error != 0) {
661 			device_printf(sc->dev, "Cannot deassert reset\n");
662 			goto fail;
663 		}
664 	}
665 #endif /* EXT_RESOURCES */
666 
667 	if (sc->bus_hz == 0) {
668 		device_printf(sc->dev, "No bus speed provided\n");
669 		goto fail;
670 	}
671 
672 	return (0);
673 
674 fail:
675 	return (ENXIO);
676 }
677 
678 int
679 dwmmc_attach(device_t dev)
680 {
681 	struct dwmmc_softc *sc;
682 	int error;
683 
684 	sc = device_get_softc(dev);
685 
686 	sc->dev = dev;
687 
688 	/* Why not to use Auto Stop? It save a hundred of irq per second */
689 	sc->use_auto_stop = 1;
690 
691 	error = parse_fdt(sc);
692 	if (error != 0) {
693 		device_printf(dev, "Can't get FDT property.\n");
694 		return (ENXIO);
695 	}
696 
697 	DWMMC_LOCK_INIT(sc);
698 
699 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
700 		device_printf(dev, "could not allocate resources\n");
701 		return (ENXIO);
702 	}
703 
704 	/* Setup interrupt handler. */
705 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
706 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
707 	if (error != 0) {
708 		device_printf(dev, "could not setup interrupt handler.\n");
709 		return (ENXIO);
710 	}
711 
712 	device_printf(dev, "Hardware version ID is %04x\n",
713 		READ4(sc, SDMMC_VERID) & 0xffff);
714 
715 	/* Reset all */
716 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
717 				  SDMMC_CTRL_FIFO_RESET |
718 				  SDMMC_CTRL_DMA_RESET)))
719 		return (ENXIO);
720 
721 	dwmmc_setup_bus(sc, sc->host.f_min);
722 
723 	if (sc->fifo_depth == 0) {
724 		sc->fifo_depth = 1 +
725 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
726 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
727 		    sc->fifo_depth);
728 	}
729 
730 	if (!sc->use_pio) {
731 		dma_stop(sc);
732 		if (dma_setup(sc))
733 			return (ENXIO);
734 
735 		/* Install desc base */
736 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
737 
738 		/* Enable DMA interrupts */
739 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
740 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
741 					   SDMMC_IDINTEN_RI |
742 					   SDMMC_IDINTEN_TI));
743 	}
744 
745 	/* Clear and disable interrups for a while */
746 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
747 	WRITE4(sc, SDMMC_INTMASK, 0);
748 
749 	/* Maximum timeout */
750 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
751 
752 	/* Enable interrupts */
753 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
754 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
755 				   SDMMC_INTMASK_DTO |
756 				   SDMMC_INTMASK_ACD |
757 				   SDMMC_INTMASK_TXDR |
758 				   SDMMC_INTMASK_RXDR |
759 				   DWMMC_ERR_FLAGS |
760 				   SDMMC_INTMASK_CD));
761 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
762 
763 	TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
764 	TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0,
765 		dwmmc_card_task, sc);
766 
767 #ifdef MMCCAM
768 	sc->ccb = NULL;
769 	if (mmc_cam_sim_alloc(dev, "dw_mmc", &sc->mmc_sim) != 0) {
770 		device_printf(dev, "cannot alloc cam sim\n");
771 		dwmmc_detach(dev);
772 		return (ENXIO);
773 	}
774 #endif
775 	/*
776 	 * Schedule a card detection as we won't get an interrupt
777 	 * if the card is inserted when we attach
778 	 */
779 	dwmmc_card_task(sc, 0);
780 	return (0);
781 }
782 
783 int
784 dwmmc_detach(device_t dev)
785 {
786 	struct dwmmc_softc *sc;
787 	int ret;
788 
789 	sc = device_get_softc(dev);
790 
791 	ret = device_delete_children(dev);
792 	if (ret != 0)
793 		return (ret);
794 
795 	taskqueue_drain(taskqueue_swi_giant, &sc->card_task);
796 	taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task);
797 
798 	if (sc->intr_cookie != NULL) {
799 		ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
800 		if (ret != 0)
801 			return (ret);
802 	}
803 	bus_release_resources(dev, dwmmc_spec, sc->res);
804 
805 	DWMMC_LOCK_DESTROY(sc);
806 
807 #ifdef EXT_RESOURCES
808 	if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0)
809 		device_printf(sc->dev, "cannot deassert reset\n");
810 	if (sc->biu != NULL && clk_disable(sc->biu) != 0)
811 		device_printf(sc->dev, "cannot disable biu clock\n");
812 	if (sc->ciu != NULL && clk_disable(sc->ciu) != 0)
813 			device_printf(sc->dev, "cannot disable ciu clock\n");
814 
815 	if (sc->vmmc && regulator_disable(sc->vmmc) != 0)
816 		device_printf(sc->dev, "Cannot disable vmmc regulator\n");
817 	if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0)
818 		device_printf(sc->dev, "Cannot disable vqmmc regulator\n");
819 #endif
820 
821 #ifdef MMCCAM
822 	mmc_cam_sim_free(&sc->mmc_sim);
823 #endif
824 
825 	return (0);
826 }
827 
828 static int
829 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
830 {
831 	int tout;
832 	int div;
833 
834 	if (freq == 0) {
835 		WRITE4(sc, SDMMC_CLKENA, 0);
836 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
837 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
838 
839 		tout = 1000;
840 		do {
841 			if (tout-- < 0) {
842 				device_printf(sc->dev, "Failed update clk\n");
843 				return (1);
844 			}
845 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
846 
847 		return (0);
848 	}
849 
850 	WRITE4(sc, SDMMC_CLKENA, 0);
851 	WRITE4(sc, SDMMC_CLKSRC, 0);
852 
853 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
854 
855 	WRITE4(sc, SDMMC_CLKDIV, div);
856 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
857 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
858 
859 	tout = 1000;
860 	do {
861 		if (tout-- < 0) {
862 			device_printf(sc->dev, "Failed to update clk\n");
863 			return (1);
864 		}
865 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
866 
867 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
868 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
869 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
870 
871 	tout = 1000;
872 	do {
873 		if (tout-- < 0) {
874 			device_printf(sc->dev, "Failed to enable clk\n");
875 			return (1);
876 		}
877 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
878 
879 	return (0);
880 }
881 
882 static int
883 dwmmc_update_ios(device_t brdev, device_t reqdev)
884 {
885 	struct dwmmc_softc *sc;
886 	struct mmc_ios *ios;
887 	uint32_t reg;
888 	int ret = 0;
889 
890 	sc = device_get_softc(brdev);
891 	ios = &sc->host.ios;
892 
893 	dprintf("Setting up clk %u bus_width %d, timming: %d\n",
894 		ios->clock, ios->bus_width, ios->timing);
895 
896 	switch (ios->power_mode) {
897 	case power_on:
898 		break;
899 	case power_off:
900 		WRITE4(sc, SDMMC_PWREN, 0);
901 		break;
902 	case power_up:
903 		WRITE4(sc, SDMMC_PWREN, 1);
904 		break;
905 	}
906 
907 	mmc_fdt_set_power(&sc->mmc_helper, ios->power_mode);
908 
909 	if (ios->bus_width == bus_width_8)
910 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
911 	else if (ios->bus_width == bus_width_4)
912 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
913 	else
914 		WRITE4(sc, SDMMC_CTYPE, 0);
915 
916 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
917 		/* XXX: take care about DDR or SDR use here */
918 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
919 	}
920 
921 	/* Set DDR mode */
922 	reg = READ4(sc, SDMMC_UHS_REG);
923 	if (ios->timing == bus_timing_uhs_ddr50 ||
924 	    ios->timing == bus_timing_mmc_ddr52 ||
925 	    ios->timing == bus_timing_mmc_hs400)
926 		reg |= (SDMMC_UHS_REG_DDR);
927 	else
928 		reg &= ~(SDMMC_UHS_REG_DDR);
929 	WRITE4(sc, SDMMC_UHS_REG, reg);
930 
931 	if (sc->update_ios)
932 		ret = sc->update_ios(sc, ios);
933 
934 	dwmmc_setup_bus(sc, ios->clock);
935 
936 	return (ret);
937 }
938 
939 static int
940 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
941 {
942 	struct mmc_data *data;
943 
944 	data = cmd->data;
945 
946 	if (data->flags & MMC_DATA_WRITE)
947 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
948 			BUS_DMASYNC_POSTWRITE);
949 	else
950 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
951 			BUS_DMASYNC_POSTREAD);
952 
953 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
954 	    BUS_DMASYNC_POSTWRITE);
955 
956 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
957 
958 	return (0);
959 }
960 
961 static int
962 dma_stop(struct dwmmc_softc *sc)
963 {
964 	int reg;
965 
966 	reg = READ4(sc, SDMMC_CTRL);
967 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
968 	reg |= (SDMMC_CTRL_DMA_RESET);
969 	WRITE4(sc, SDMMC_CTRL, reg);
970 
971 	reg = READ4(sc, SDMMC_BMOD);
972 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
973 	reg |= (SDMMC_BMOD_SWR);
974 	WRITE4(sc, SDMMC_BMOD, reg);
975 
976 	return (0);
977 }
978 
979 static int
980 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
981 {
982 	struct mmc_data *data;
983 	int err;
984 	int reg;
985 
986 	data = cmd->data;
987 
988 	reg = READ4(sc, SDMMC_INTMASK);
989 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
990 	WRITE4(sc, SDMMC_INTMASK, reg);
991 	dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len);
992 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
993 		data->data, data->len, dwmmc_ring_setup,
994 		sc, BUS_DMA_NOWAIT);
995 	if (err != 0)
996 		panic("dmamap_load failed\n");
997 
998 	/* Ensure the device can see the desc */
999 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
1000 	    BUS_DMASYNC_PREWRITE);
1001 
1002 	if (data->flags & MMC_DATA_WRITE)
1003 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
1004 			BUS_DMASYNC_PREWRITE);
1005 	else
1006 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
1007 			BUS_DMASYNC_PREREAD);
1008 
1009 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1010 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1011 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1012 
1013 	WRITE4(sc, SDMMC_FIFOTH, reg);
1014 	wmb();
1015 
1016 	reg = READ4(sc, SDMMC_CTRL);
1017 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
1018 	WRITE4(sc, SDMMC_CTRL, reg);
1019 	wmb();
1020 
1021 	reg = READ4(sc, SDMMC_BMOD);
1022 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
1023 	WRITE4(sc, SDMMC_BMOD, reg);
1024 
1025 	/* Start */
1026 	WRITE4(sc, SDMMC_PLDMND, 1);
1027 
1028 	return (0);
1029 }
1030 
1031 static int
1032 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
1033 {
1034 	struct mmc_data *data;
1035 	int reg;
1036 
1037 	data = cmd->data;
1038 	data->xfer_len = 0;
1039 
1040 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1041 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1042 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1043 
1044 	WRITE4(sc, SDMMC_FIFOTH, reg);
1045 	wmb();
1046 
1047 	return (0);
1048 }
1049 
1050 static void
1051 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
1052 {
1053 	struct mmc_data *data;
1054 	uint32_t *p, status;
1055 
1056 	if (cmd == NULL || cmd->data == NULL)
1057 		return;
1058 
1059 	data = cmd->data;
1060 	if ((data->flags & MMC_DATA_READ) == 0)
1061 		return;
1062 
1063 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1064 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1065 
1066 	while (data->xfer_len < data->len) {
1067 		status = READ4(sc, SDMMC_STATUS);
1068 		if (status & SDMMC_STATUS_FIFO_EMPTY)
1069 			break;
1070 		*p++ = READ4(sc, SDMMC_DATA);
1071 		data->xfer_len += 4;
1072 	}
1073 
1074 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
1075 }
1076 
1077 static void
1078 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
1079 {
1080 	struct mmc_data *data;
1081 	uint32_t *p, status;
1082 
1083 	if (cmd == NULL || cmd->data == NULL)
1084 		return;
1085 
1086 	data = cmd->data;
1087 	if ((data->flags & MMC_DATA_WRITE) == 0)
1088 		return;
1089 
1090 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1091 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1092 
1093 	while (data->xfer_len < data->len) {
1094 		status = READ4(sc, SDMMC_STATUS);
1095 		if (status & SDMMC_STATUS_FIFO_FULL)
1096 			break;
1097 		WRITE4(sc, SDMMC_DATA, *p++);
1098 		data->xfer_len += 4;
1099 	}
1100 
1101 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
1102 }
1103 
1104 static void
1105 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
1106 {
1107 	struct mmc_data *data;
1108 	uint32_t blksz;
1109 	uint32_t cmdr;
1110 
1111 	dprintf("%s\n", __func__);
1112 	sc->curcmd = cmd;
1113 	data = cmd->data;
1114 
1115 #ifndef MMCCAM
1116 	/* XXX Upper layers don't always set this */
1117 	cmd->mrq = sc->req;
1118 #endif
1119 	/* Begin setting up command register. */
1120 
1121 	cmdr = cmd->opcode;
1122 
1123 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
1124 
1125 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
1126 	    cmd->opcode == MMC_GO_IDLE_STATE ||
1127 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
1128 		cmdr |= SDMMC_CMD_STOP_ABORT;
1129 	else if (cmd->opcode != MMC_SEND_STATUS && data)
1130 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
1131 
1132 	/* Set up response handling. */
1133 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
1134 		cmdr |= SDMMC_CMD_RESP_EXP;
1135 		if (cmd->flags & MMC_RSP_136)
1136 			cmdr |= SDMMC_CMD_RESP_LONG;
1137 	}
1138 
1139 	if (cmd->flags & MMC_RSP_CRC)
1140 		cmdr |= SDMMC_CMD_RESP_CRC;
1141 
1142 	/*
1143 	 * XXX: Not all platforms want this.
1144 	 */
1145 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
1146 
1147 	if ((sc->flags & CARD_INIT_DONE) == 0) {
1148 		sc->flags |= (CARD_INIT_DONE);
1149 		cmdr |= SDMMC_CMD_SEND_INIT;
1150 	}
1151 
1152 	if (data) {
1153 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1154 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
1155 		     sc->use_auto_stop)
1156 			cmdr |= SDMMC_CMD_SEND_ASTOP;
1157 
1158 		cmdr |= SDMMC_CMD_DATA_EXP;
1159 		if (data->flags & MMC_DATA_STREAM)
1160 			cmdr |= SDMMC_CMD_MODE_STREAM;
1161 		if (data->flags & MMC_DATA_WRITE)
1162 			cmdr |= SDMMC_CMD_DATA_WRITE;
1163 
1164 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
1165 #ifdef MMCCAM
1166 		if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1167 			WRITE4(sc, SDMMC_BLKSIZ, cmd->data->block_size);
1168 			WRITE4(sc, SDMMC_BYTCNT, cmd->data->len);
1169 		} else
1170 #endif
1171 		{
1172 			WRITE4(sc, SDMMC_BYTCNT, data->len);
1173 			blksz = (data->len < MMC_SECTOR_SIZE) ? \
1174 				data->len : MMC_SECTOR_SIZE;
1175 			WRITE4(sc, SDMMC_BLKSIZ, blksz);
1176 		}
1177 
1178 		if (sc->use_pio) {
1179 			pio_prepare(sc, cmd);
1180 		} else {
1181 			dma_prepare(sc, cmd);
1182 		}
1183 		wmb();
1184 	}
1185 
1186 	dprintf("cmdr 0x%08x\n", cmdr);
1187 
1188 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
1189 	wmb();
1190 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
1191 };
1192 
1193 static void
1194 dwmmc_next_operation(struct dwmmc_softc *sc)
1195 {
1196 	struct mmc_command *cmd;
1197 	dprintf("%s\n", __func__);
1198 #ifdef MMCCAM
1199 	union ccb *ccb;
1200 
1201 	ccb = sc->ccb;
1202 	if (ccb == NULL)
1203 		return;
1204 	cmd = &ccb->mmcio.cmd;
1205 #else
1206 	struct mmc_request *req;
1207 
1208 	req = sc->req;
1209 	if (req == NULL)
1210 		return;
1211 	cmd = req->cmd;
1212 #endif
1213 
1214 	sc->acd_rcvd = 0;
1215 	sc->dto_rcvd = 0;
1216 	sc->cmd_done = 0;
1217 
1218 	/*
1219 	 * XXX: Wait until card is still busy.
1220 	 * We do need this to prevent data timeouts,
1221 	 * mostly caused by multi-block write command
1222 	 * followed by single-read.
1223 	 */
1224 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1225 		continue;
1226 
1227 	if (sc->flags & PENDING_CMD) {
1228 		sc->flags &= ~PENDING_CMD;
1229 		dwmmc_start_cmd(sc, cmd);
1230 		return;
1231 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1232 		sc->flags &= ~PENDING_STOP;
1233 		/// XXX: What to do with this?
1234 		//dwmmc_start_cmd(sc, req->stop);
1235 		return;
1236 	}
1237 
1238 #ifdef MMCCAM
1239 	sc->ccb = NULL;
1240 	sc->curcmd = NULL;
1241 	ccb->ccb_h.status =
1242 		(ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
1243 	xpt_done(ccb);
1244 #else
1245 	sc->req = NULL;
1246 	sc->curcmd = NULL;
1247 	req->done(req);
1248 #endif
1249 }
1250 
1251 static int
1252 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1253 {
1254 	struct dwmmc_softc *sc;
1255 
1256 	sc = device_get_softc(brdev);
1257 
1258 	dprintf("%s\n", __func__);
1259 
1260 	DWMMC_LOCK(sc);
1261 
1262 #ifdef MMCCAM
1263 	sc->flags |= PENDING_CMD;
1264 #else
1265 	if (sc->req != NULL) {
1266 		DWMMC_UNLOCK(sc);
1267 		return (EBUSY);
1268 	}
1269 
1270 	sc->req = req;
1271 	sc->flags |= PENDING_CMD;
1272 	if (sc->req->stop)
1273 		sc->flags |= PENDING_STOP;
1274 #endif
1275 	dwmmc_next_operation(sc);
1276 
1277 	DWMMC_UNLOCK(sc);
1278 	return (0);
1279 }
1280 
1281 #ifndef MMCCAM
1282 static int
1283 dwmmc_get_ro(device_t brdev, device_t reqdev)
1284 {
1285 
1286 	dprintf("%s\n", __func__);
1287 
1288 	return (0);
1289 }
1290 
1291 static int
1292 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1293 {
1294 	struct dwmmc_softc *sc;
1295 
1296 	sc = device_get_softc(brdev);
1297 
1298 	DWMMC_LOCK(sc);
1299 	while (sc->bus_busy)
1300 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1301 	sc->bus_busy++;
1302 	DWMMC_UNLOCK(sc);
1303 	return (0);
1304 }
1305 
1306 static int
1307 dwmmc_release_host(device_t brdev, device_t reqdev)
1308 {
1309 	struct dwmmc_softc *sc;
1310 
1311 	sc = device_get_softc(brdev);
1312 
1313 	DWMMC_LOCK(sc);
1314 	sc->bus_busy--;
1315 	wakeup(sc);
1316 	DWMMC_UNLOCK(sc);
1317 	return (0);
1318 }
1319 #endif	/* !MMCCAM */
1320 
1321 static int
1322 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1323 {
1324 	struct dwmmc_softc *sc;
1325 
1326 	sc = device_get_softc(bus);
1327 
1328 	switch (which) {
1329 	default:
1330 		return (EINVAL);
1331 	case MMCBR_IVAR_BUS_MODE:
1332 		*(int *)result = sc->host.ios.bus_mode;
1333 		break;
1334 	case MMCBR_IVAR_BUS_WIDTH:
1335 		*(int *)result = sc->host.ios.bus_width;
1336 		break;
1337 	case MMCBR_IVAR_CHIP_SELECT:
1338 		*(int *)result = sc->host.ios.chip_select;
1339 		break;
1340 	case MMCBR_IVAR_CLOCK:
1341 		*(int *)result = sc->host.ios.clock;
1342 		break;
1343 	case MMCBR_IVAR_F_MIN:
1344 		*(int *)result = sc->host.f_min;
1345 		break;
1346 	case MMCBR_IVAR_F_MAX:
1347 		*(int *)result = sc->host.f_max;
1348 		break;
1349 	case MMCBR_IVAR_HOST_OCR:
1350 		*(int *)result = sc->host.host_ocr;
1351 		break;
1352 	case MMCBR_IVAR_MODE:
1353 		*(int *)result = sc->host.mode;
1354 		break;
1355 	case MMCBR_IVAR_OCR:
1356 		*(int *)result = sc->host.ocr;
1357 		break;
1358 	case MMCBR_IVAR_POWER_MODE:
1359 		*(int *)result = sc->host.ios.power_mode;
1360 		break;
1361 	case MMCBR_IVAR_VDD:
1362 		*(int *)result = sc->host.ios.vdd;
1363 		break;
1364 	case MMCBR_IVAR_VCCQ:
1365 		*(int *)result = sc->host.ios.vccq;
1366 		break;
1367 	case MMCBR_IVAR_CAPS:
1368 		*(int *)result = sc->host.caps;
1369 		break;
1370 	case MMCBR_IVAR_MAX_DATA:
1371 		*(int *)result = DWMMC_MAX_DATA;
1372 		break;
1373 	case MMCBR_IVAR_TIMING:
1374 		*(int *)result = sc->host.ios.timing;
1375 		break;
1376 	}
1377 	return (0);
1378 }
1379 
1380 static int
1381 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1382 {
1383 	struct dwmmc_softc *sc;
1384 
1385 	sc = device_get_softc(bus);
1386 
1387 	switch (which) {
1388 	default:
1389 		return (EINVAL);
1390 	case MMCBR_IVAR_BUS_MODE:
1391 		sc->host.ios.bus_mode = value;
1392 		break;
1393 	case MMCBR_IVAR_BUS_WIDTH:
1394 		sc->host.ios.bus_width = value;
1395 		break;
1396 	case MMCBR_IVAR_CHIP_SELECT:
1397 		sc->host.ios.chip_select = value;
1398 		break;
1399 	case MMCBR_IVAR_CLOCK:
1400 		sc->host.ios.clock = value;
1401 		break;
1402 	case MMCBR_IVAR_MODE:
1403 		sc->host.mode = value;
1404 		break;
1405 	case MMCBR_IVAR_OCR:
1406 		sc->host.ocr = value;
1407 		break;
1408 	case MMCBR_IVAR_POWER_MODE:
1409 		sc->host.ios.power_mode = value;
1410 		break;
1411 	case MMCBR_IVAR_VDD:
1412 		sc->host.ios.vdd = value;
1413 		break;
1414 	case MMCBR_IVAR_TIMING:
1415 		sc->host.ios.timing = value;
1416 		break;
1417 	case MMCBR_IVAR_VCCQ:
1418 		sc->host.ios.vccq = value;
1419 		break;
1420 	/* These are read-only */
1421 	case MMCBR_IVAR_CAPS:
1422 	case MMCBR_IVAR_HOST_OCR:
1423 	case MMCBR_IVAR_F_MIN:
1424 	case MMCBR_IVAR_F_MAX:
1425 	case MMCBR_IVAR_MAX_DATA:
1426 		return (EINVAL);
1427 	}
1428 	return (0);
1429 }
1430 
1431 #ifdef MMCCAM
1432 /* Note: this function likely belongs to the specific driver impl */
1433 static int
1434 dwmmc_switch_vccq(device_t dev, device_t child)
1435 {
1436 	device_printf(dev, "This is a default impl of switch_vccq() that always fails\n");
1437 	return EINVAL;
1438 }
1439 
1440 static int
1441 dwmmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1442 {
1443 	struct dwmmc_softc *sc;
1444 
1445 	sc = device_get_softc(dev);
1446 
1447 	cts->host_ocr = sc->host.host_ocr;
1448 	cts->host_f_min = sc->host.f_min;
1449 	cts->host_f_max = sc->host.f_max;
1450 	cts->host_caps = sc->host.caps;
1451 	cts->host_max_data = DWMMC_MAX_DATA;
1452 	memcpy(&cts->ios, &sc->host.ios, sizeof(struct mmc_ios));
1453 
1454 	return (0);
1455 }
1456 
1457 static int
1458 dwmmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1459 {
1460 	struct dwmmc_softc *sc;
1461 	struct mmc_ios *ios;
1462 	struct mmc_ios *new_ios;
1463 	int res;
1464 
1465 	sc = device_get_softc(dev);
1466 	ios = &sc->host.ios;
1467 
1468 	new_ios = &cts->ios;
1469 
1470 	/* Update only requested fields */
1471 	if (cts->ios_valid & MMC_CLK) {
1472 		ios->clock = new_ios->clock;
1473 		if (bootverbose)
1474 			device_printf(sc->dev, "Clock => %d\n", ios->clock);
1475 	}
1476 	if (cts->ios_valid & MMC_VDD) {
1477 		ios->vdd = new_ios->vdd;
1478 		if (bootverbose)
1479 			device_printf(sc->dev, "VDD => %d\n", ios->vdd);
1480 	}
1481 	if (cts->ios_valid & MMC_CS) {
1482 		ios->chip_select = new_ios->chip_select;
1483 		if (bootverbose)
1484 			device_printf(sc->dev, "CS => %d\n", ios->chip_select);
1485 	}
1486 	if (cts->ios_valid & MMC_BW) {
1487 		ios->bus_width = new_ios->bus_width;
1488 		if (bootverbose)
1489 			device_printf(sc->dev, "Bus width => %d\n", ios->bus_width);
1490 	}
1491 	if (cts->ios_valid & MMC_PM) {
1492 		ios->power_mode = new_ios->power_mode;
1493 		if (bootverbose)
1494 			device_printf(sc->dev, "Power mode => %d\n", ios->power_mode);
1495 	}
1496 	if (cts->ios_valid & MMC_BT) {
1497 		ios->timing = new_ios->timing;
1498 		if (bootverbose)
1499 			device_printf(sc->dev, "Timing => %d\n", ios->timing);
1500 	}
1501 	if (cts->ios_valid & MMC_BM) {
1502 		ios->bus_mode = new_ios->bus_mode;
1503 		if (bootverbose)
1504 			device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode);
1505 	}
1506 	if (cts->ios_valid & MMC_VCCQ) {
1507 		ios->vccq = new_ios->vccq;
1508 		if (bootverbose)
1509 			device_printf(sc->dev, "VCCQ => %d\n", ios->vccq);
1510 		res = dwmmc_switch_vccq(sc->dev, NULL);
1511 		device_printf(sc->dev, "VCCQ switch result: %d\n", res);
1512 	}
1513 
1514 	return (dwmmc_update_ios(sc->dev, NULL));
1515 }
1516 
1517 static int
1518 dwmmc_cam_request(device_t dev, union ccb *ccb)
1519 {
1520 	struct dwmmc_softc *sc;
1521 	struct ccb_mmcio *mmcio;
1522 
1523 	sc = device_get_softc(dev);
1524 	mmcio = &ccb->mmcio;
1525 
1526 	DWMMC_LOCK(sc);
1527 
1528 #ifdef DEBUG
1529 	if (__predict_false(bootverbose)) {
1530 		device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1531 			    mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
1532 			    mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
1533 			    mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
1534 	}
1535 #endif
1536 	if (mmcio->cmd.data != NULL) {
1537 		if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
1538 			panic("data->len = %d, data->flags = %d -- something is b0rked",
1539 			      (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
1540 	}
1541 	if (sc->ccb != NULL) {
1542 		device_printf(sc->dev, "Controller still has an active command\n");
1543 		return (EBUSY);
1544 	}
1545 	sc->ccb = ccb;
1546 	DWMMC_UNLOCK(sc);
1547 	dwmmc_request(sc->dev, NULL, NULL);
1548 
1549 	return (0);
1550 }
1551 
1552 static void
1553 dwmmc_cam_poll(device_t dev)
1554 {
1555 	struct dwmmc_softc *sc;
1556 
1557 	sc = device_get_softc(dev);
1558 	dwmmc_intr(sc);
1559 }
1560 #endif /* MMCCAM */
1561 
1562 static device_method_t dwmmc_methods[] = {
1563 	/* Bus interface */
1564 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1565 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1566 
1567 #ifndef MMCCAM
1568 	/* mmcbr_if */
1569 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1570 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1571 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1572 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1573 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1574 #endif
1575 
1576 #ifdef MMCCAM
1577 	/* MMCCAM interface */
1578 	DEVMETHOD(mmc_sim_get_tran_settings,	dwmmc_get_tran_settings),
1579 	DEVMETHOD(mmc_sim_set_tran_settings,	dwmmc_set_tran_settings),
1580 	DEVMETHOD(mmc_sim_cam_request,		dwmmc_cam_request),
1581 	DEVMETHOD(mmc_sim_cam_poll,		dwmmc_cam_poll),
1582 
1583 	DEVMETHOD(bus_add_child,		bus_generic_add_child),
1584 #endif
1585 
1586 	DEVMETHOD_END
1587 };
1588 
1589 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1590     sizeof(struct dwmmc_softc));
1591