xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision 3422ca83ba48e5c9174542a2d3ba8225275779a6)
1 /*-
2  * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/rman.h>
48 #include <sys/queue.h>
49 #include <sys/taskqueue.h>
50 
51 #include <dev/mmc/bridge.h>
52 #include <dev/mmc/mmcbrvar.h>
53 #include <dev/mmc/mmc_fdt_helpers.h>
54 
55 #include <dev/fdt/fdt_common.h>
56 #include <dev/ofw/openfirm.h>
57 #include <dev/ofw/ofw_bus.h>
58 #include <dev/ofw/ofw_bus_subr.h>
59 
60 #include <machine/bus.h>
61 #include <machine/cpu.h>
62 #include <machine/intr.h>
63 
64 #ifdef EXT_RESOURCES
65 #include <dev/extres/clk/clk.h>
66 #endif
67 
68 #include <dev/mmc/host/dwmmc_reg.h>
69 #include <dev/mmc/host/dwmmc_var.h>
70 
71 #include "opt_mmccam.h"
72 
73 #ifdef MMCCAM
74 #include <cam/cam.h>
75 #include <cam/cam_ccb.h>
76 #include <cam/cam_debug.h>
77 #include <cam/cam_sim.h>
78 #include <cam/cam_xpt_sim.h>
79 #endif
80 
81 #include "mmcbr_if.h"
82 
83 #ifdef DEBUG
84 #define dprintf(fmt, args...) printf(fmt, ##args)
85 #else
86 #define dprintf(x, arg...)
87 #endif
88 
89 #define	READ4(_sc, _reg) \
90 	bus_read_4((_sc)->res[0], _reg)
91 #define	WRITE4(_sc, _reg, _val) \
92 	bus_write_4((_sc)->res[0], _reg, _val)
93 
94 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
95 
96 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
97 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
98 #define	DWMMC_LOCK_INIT(_sc) \
99 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
100 	    "dwmmc", MTX_DEF)
101 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
102 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
103 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
104 
105 #define	PENDING_CMD	0x01
106 #define	PENDING_STOP	0x02
107 #define	CARD_INIT_DONE	0x04
108 
109 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
110 				|SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \
111 				|SDMMC_INTMASK_EBE)
112 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
113 				|SDMMC_INTMASK_RE)
114 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
115 				|SDMMC_INTMASK_HLE)
116 
117 #define	DES0_DIC	(1 << 1)	/* Disable Interrupt on Completion */
118 #define	DES0_LD		(1 << 2)	/* Last Descriptor */
119 #define	DES0_FS		(1 << 3)	/* First Descriptor */
120 #define	DES0_CH		(1 << 4)	/* second address CHained */
121 #define	DES0_ER		(1 << 5)	/* End of Ring */
122 #define	DES0_CES	(1 << 30)	/* Card Error Summary */
123 #define	DES0_OWN	(1 << 31)	/* OWN */
124 
125 #define	DES1_BS1_MASK	0x1fff
126 
127 struct idmac_desc {
128 	uint32_t	des0;	/* control */
129 	uint32_t	des1;	/* bufsize */
130 	uint32_t	des2;	/* buf1 phys addr */
131 	uint32_t	des3;	/* buf2 phys addr or next descr */
132 };
133 
134 #define	IDMAC_DESC_SEGS	(PAGE_SIZE / (sizeof(struct idmac_desc)))
135 #define	IDMAC_DESC_SIZE	(sizeof(struct idmac_desc) * IDMAC_DESC_SEGS)
136 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
137 #define	IDMAC_MAX_SIZE	4096
138 
139 static void dwmmc_next_operation(struct dwmmc_softc *);
140 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
141 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
142 static int dma_stop(struct dwmmc_softc *);
143 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
144 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
145 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present);
146 static int dwmmc_switch_vccq(device_t, device_t);
147 #ifdef MMCCAM
148 static void dwmmc_cam_action(struct cam_sim *, union ccb *);
149 static void dwmmc_cam_poll(struct cam_sim *);
150 static int dwmmc_cam_settran_settings(struct dwmmc_softc *, union ccb *);
151 static int dwmmc_cam_request(struct dwmmc_softc *, union ccb *);
152 static void dwmmc_cam_handle_mmcio(struct cam_sim *, union ccb *);
153 #endif
154 
155 static struct resource_spec dwmmc_spec[] = {
156 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
157 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
158 	{ -1, 0 }
159 };
160 
161 #define	HWTYPE_MASK		(0x0000ffff)
162 #define	HWFLAG_MASK		(0xffff << 16)
163 
164 static void
165 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
166 {
167 
168 	if (error != 0)
169 		return;
170 	*(bus_addr_t *)arg = segs[0].ds_addr;
171 }
172 
173 static void
174 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
175 {
176 	struct dwmmc_softc *sc;
177 	int idx;
178 
179 	if (error != 0)
180 		return;
181 
182 	sc = arg;
183 
184 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
185 
186 	for (idx = 0; idx < nsegs; idx++) {
187 		sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH);
188 		sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK;
189 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
190 
191 		if (idx == 0)
192 			sc->desc_ring[idx].des0 |= DES0_FS;
193 
194 		if (idx == (nsegs - 1)) {
195 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
196 			sc->desc_ring[idx].des0 |= DES0_LD;
197 		}
198 	}
199 }
200 
201 static int
202 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
203 {
204 	int reg;
205 	int i;
206 
207 	reg = READ4(sc, SDMMC_CTRL);
208 	reg |= (reset_bits);
209 	WRITE4(sc, SDMMC_CTRL, reg);
210 
211 	/* Wait reset done */
212 	for (i = 0; i < 100; i++) {
213 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
214 			return (0);
215 		DELAY(10);
216 	}
217 
218 	device_printf(sc->dev, "Reset failed\n");
219 
220 	return (1);
221 }
222 
223 static int
224 dma_setup(struct dwmmc_softc *sc)
225 {
226 	int error;
227 	int nidx;
228 	int idx;
229 
230 	/*
231 	 * Set up TX descriptor ring, descriptors, and dma maps.
232 	 */
233 	error = bus_dma_tag_create(
234 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
235 	    4096, 0,			/* alignment, boundary */
236 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
237 	    BUS_SPACE_MAXADDR,		/* highaddr */
238 	    NULL, NULL,			/* filter, filterarg */
239 	    IDMAC_DESC_SIZE, 1,		/* maxsize, nsegments */
240 	    IDMAC_DESC_SIZE,		/* maxsegsize */
241 	    0,				/* flags */
242 	    NULL, NULL,			/* lockfunc, lockarg */
243 	    &sc->desc_tag);
244 	if (error != 0) {
245 		device_printf(sc->dev,
246 		    "could not create ring DMA tag.\n");
247 		return (1);
248 	}
249 
250 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
251 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
252 	    &sc->desc_map);
253 	if (error != 0) {
254 		device_printf(sc->dev,
255 		    "could not allocate descriptor ring.\n");
256 		return (1);
257 	}
258 
259 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
260 	    sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr,
261 	    &sc->desc_ring_paddr, 0);
262 	if (error != 0) {
263 		device_printf(sc->dev,
264 		    "could not load descriptor ring map.\n");
265 		return (1);
266 	}
267 
268 	for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) {
269 		sc->desc_ring[idx].des0 = DES0_CH;
270 		sc->desc_ring[idx].des1 = 0;
271 		nidx = (idx + 1) % IDMAC_DESC_SEGS;
272 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
273 		    (nidx * sizeof(struct idmac_desc));
274 	}
275 	sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr;
276 	sc->desc_ring[idx - 1].des0 |= DES0_ER;
277 
278 	error = bus_dma_tag_create(
279 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
280 	    CACHE_LINE_SIZE, 0,		/* alignment, boundary */
281 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
282 	    BUS_SPACE_MAXADDR,		/* highaddr */
283 	    NULL, NULL,			/* filter, filterarg */
284 	    IDMAC_MAX_SIZE * IDMAC_DESC_SEGS,	/* maxsize */
285 	    IDMAC_DESC_SEGS,		/* nsegments */
286 	    IDMAC_MAX_SIZE,		/* maxsegsize */
287 	    0,				/* flags */
288 	    NULL, NULL,			/* lockfunc, lockarg */
289 	    &sc->buf_tag);
290 	if (error != 0) {
291 		device_printf(sc->dev,
292 		    "could not create ring DMA tag.\n");
293 		return (1);
294 	}
295 
296 	error = bus_dmamap_create(sc->buf_tag, 0,
297 	    &sc->buf_map);
298 	if (error != 0) {
299 		device_printf(sc->dev,
300 		    "could not create TX buffer DMA map.\n");
301 		return (1);
302 	}
303 
304 	return (0);
305 }
306 
307 static void
308 dwmmc_cmd_done(struct dwmmc_softc *sc)
309 {
310 	struct mmc_command *cmd;
311 #ifdef MMCCAM
312 	union ccb *ccb;
313 #endif
314 
315 #ifdef MMCCAM
316 	ccb = sc->ccb;
317 	if (ccb == NULL)
318 		return;
319 	cmd = &ccb->mmcio.cmd;
320 #else
321 	cmd = sc->curcmd;
322 #endif
323 	if (cmd == NULL)
324 		return;
325 
326 	if (cmd->flags & MMC_RSP_PRESENT) {
327 		if (cmd->flags & MMC_RSP_136) {
328 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
329 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
330 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
331 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
332 		} else {
333 			cmd->resp[3] = 0;
334 			cmd->resp[2] = 0;
335 			cmd->resp[1] = 0;
336 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
337 		}
338 	}
339 }
340 
341 static void
342 dwmmc_tasklet(struct dwmmc_softc *sc)
343 {
344 	struct mmc_command *cmd;
345 
346 	cmd = sc->curcmd;
347 	if (cmd == NULL)
348 		return;
349 
350 	if (!sc->cmd_done)
351 		return;
352 
353 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
354 		dwmmc_next_operation(sc);
355 	} else if (cmd->data && sc->dto_rcvd) {
356 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
357 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
358 		     sc->use_auto_stop) {
359 			if (sc->acd_rcvd)
360 				dwmmc_next_operation(sc);
361 		} else {
362 			dwmmc_next_operation(sc);
363 		}
364 	}
365 }
366 
367 static void
368 dwmmc_intr(void *arg)
369 {
370 	struct mmc_command *cmd;
371 	struct dwmmc_softc *sc;
372 	uint32_t reg;
373 
374 	sc = arg;
375 
376 	DWMMC_LOCK(sc);
377 
378 	cmd = sc->curcmd;
379 
380 	/* First handle SDMMC controller interrupts */
381 	reg = READ4(sc, SDMMC_MINTSTS);
382 	if (reg) {
383 		dprintf("%s 0x%08x\n", __func__, reg);
384 
385 		if (reg & DWMMC_CMD_ERR_FLAGS) {
386 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
387 				reg, cmd->opcode);
388 			cmd->error = MMC_ERR_TIMEOUT;
389 		}
390 
391 		if (reg & DWMMC_DATA_ERR_FLAGS) {
392 			dprintf("data err 0x%08x cmd 0x%08x\n",
393 				reg, cmd->opcode);
394 			cmd->error = MMC_ERR_FAILED;
395 			if (!sc->use_pio) {
396 				dma_done(sc, cmd);
397 				dma_stop(sc);
398 			}
399 		}
400 
401 		if (reg & SDMMC_INTMASK_CMD_DONE) {
402 			dwmmc_cmd_done(sc);
403 			sc->cmd_done = 1;
404 		}
405 
406 		if (reg & SDMMC_INTMASK_ACD)
407 			sc->acd_rcvd = 1;
408 
409 		if (reg & SDMMC_INTMASK_DTO)
410 			sc->dto_rcvd = 1;
411 
412 		if (reg & SDMMC_INTMASK_CD) {
413 			dwmmc_handle_card_present(sc,
414 			    READ4(sc, SDMMC_CDETECT) == 0 ? true : false);
415 		}
416 	}
417 
418 	/* Ack interrupts */
419 	WRITE4(sc, SDMMC_RINTSTS, reg);
420 
421 	if (sc->use_pio) {
422 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
423 			pio_read(sc, cmd);
424 		}
425 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
426 			pio_write(sc, cmd);
427 		}
428 	} else {
429 		/* Now handle DMA interrupts */
430 		reg = READ4(sc, SDMMC_IDSTS);
431 		if (reg) {
432 			dprintf("dma intr 0x%08x\n", reg);
433 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
434 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
435 							 SDMMC_IDINTEN_RI));
436 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
437 				dma_done(sc, cmd);
438 			}
439 		}
440 	}
441 
442 	dwmmc_tasklet(sc);
443 
444 	DWMMC_UNLOCK(sc);
445 }
446 
447 static void
448 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
449 {
450 	bool was_present;
451 
452 	was_present = sc->child != NULL;
453 
454 	if (!was_present && is_present) {
455 		taskqueue_enqueue_timeout(taskqueue_swi_giant,
456 		  &sc->card_delayed_task, -(hz / 2));
457 	} else if (was_present && !is_present) {
458 		taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task);
459 	}
460 }
461 
462 static void
463 dwmmc_card_task(void *arg, int pending __unused)
464 {
465 	struct dwmmc_softc *sc = arg;
466 
467 #ifdef MMCCAM
468 	mmccam_start_discovery(sc->sim);
469 #else
470 	DWMMC_LOCK(sc);
471 
472 	if (READ4(sc, SDMMC_CDETECT) == 0) {
473 		if (sc->child == NULL) {
474 			if (bootverbose)
475 				device_printf(sc->dev, "Card inserted\n");
476 
477 			sc->child = device_add_child(sc->dev, "mmc", -1);
478 			DWMMC_UNLOCK(sc);
479 			if (sc->child) {
480 				device_set_ivars(sc->child, sc);
481 				(void)device_probe_and_attach(sc->child);
482 			}
483 		} else
484 			DWMMC_UNLOCK(sc);
485 	} else {
486 		/* Card isn't present, detach if necessary */
487 		if (sc->child != NULL) {
488 			if (bootverbose)
489 				device_printf(sc->dev, "Card removed\n");
490 
491 			DWMMC_UNLOCK(sc);
492 			device_delete_child(sc->dev, sc->child);
493 			sc->child = NULL;
494 		} else
495 			DWMMC_UNLOCK(sc);
496 	}
497 #endif /* MMCCAM */
498 }
499 
500 static int
501 parse_fdt(struct dwmmc_softc *sc)
502 {
503 	pcell_t dts_value[3];
504 	phandle_t node;
505 	uint32_t bus_hz = 0;
506 	int len;
507 #ifdef EXT_RESOURCES
508 	int error;
509 #endif
510 
511 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
512 		return (ENXIO);
513 
514 	/* Set some defaults for freq and supported mode */
515 	sc->host.f_min = 400000;
516 	sc->host.f_max = 200000000;
517 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
518 	sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
519 	mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host);
520 
521 	/* fifo-depth */
522 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
523 		OF_getencprop(node, "fifo-depth", dts_value, len);
524 		sc->fifo_depth = dts_value[0];
525 	}
526 
527 	/* num-slots (Deprecated) */
528 	sc->num_slots = 1;
529 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
530 		device_printf(sc->dev, "num-slots property is deprecated\n");
531 		OF_getencprop(node, "num-slots", dts_value, len);
532 		sc->num_slots = dts_value[0];
533 	}
534 
535 	/* clock-frequency */
536 	if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
537 		OF_getencprop(node, "clock-frequency", dts_value, len);
538 		bus_hz = dts_value[0];
539 	}
540 
541 #ifdef EXT_RESOURCES
542 
543 	/* IP block reset is optional */
544 	error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset);
545 	if (error != 0 &&
546 	    error != ENOENT &&
547 	    error != ENODEV) {
548 		device_printf(sc->dev, "Cannot get reset\n");
549 		goto fail;
550 	}
551 
552 	/* vmmc regulator is optional */
553 	error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply",
554 	     &sc->vmmc);
555 	if (error != 0 &&
556 	    error != ENOENT &&
557 	    error != ENODEV) {
558 		device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n");
559 		goto fail;
560 	}
561 
562 	/* vqmmc regulator is optional */
563 	error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply",
564 	     &sc->vqmmc);
565 	if (error != 0 &&
566 	    error != ENOENT &&
567 	    error != ENODEV) {
568 		device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n");
569 		goto fail;
570 	}
571 
572 	/* Assert reset first */
573 	if (sc->hwreset != NULL) {
574 		error = hwreset_assert(sc->hwreset);
575 		if (error != 0) {
576 			device_printf(sc->dev, "Cannot assert reset\n");
577 			goto fail;
578 		}
579 	}
580 
581 	/* BIU (Bus Interface Unit clock) is optional */
582 	error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
583 	if (error != 0 &&
584 	    error != ENOENT &&
585 	    error != ENODEV) {
586 		device_printf(sc->dev, "Cannot get 'biu' clock\n");
587 		goto fail;
588 	}
589 
590 	if (sc->biu) {
591 		error = clk_enable(sc->biu);
592 		if (error != 0) {
593 			device_printf(sc->dev, "cannot enable biu clock\n");
594 			goto fail;
595 		}
596 	}
597 
598 	/*
599 	 * CIU (Controller Interface Unit clock) is mandatory
600 	 * if no clock-frequency property is given
601 	 */
602 	error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
603 	if (error != 0 &&
604 	    error != ENOENT &&
605 	    error != ENODEV) {
606 		device_printf(sc->dev, "Cannot get 'ciu' clock\n");
607 		goto fail;
608 	}
609 
610 	if (sc->ciu) {
611 		if (bus_hz != 0) {
612 			error = clk_set_freq(sc->ciu, bus_hz, 0);
613 			if (error != 0)
614 				device_printf(sc->dev,
615 				    "cannot set ciu clock to %u\n", bus_hz);
616 		}
617 		error = clk_enable(sc->ciu);
618 		if (error != 0) {
619 			device_printf(sc->dev, "cannot enable ciu clock\n");
620 			goto fail;
621 		}
622 		clk_get_freq(sc->ciu, &sc->bus_hz);
623 	}
624 
625 	/* Enable regulators */
626 	if (sc->vmmc != NULL) {
627 		error = regulator_enable(sc->vmmc);
628 		if (error != 0) {
629 			device_printf(sc->dev, "Cannot enable vmmc regulator\n");
630 			goto fail;
631 		}
632 	}
633 	if (sc->vqmmc != NULL) {
634 		error = regulator_enable(sc->vqmmc);
635 		if (error != 0) {
636 			device_printf(sc->dev, "Cannot enable vqmmc regulator\n");
637 			goto fail;
638 		}
639 	}
640 
641 	/* Take dwmmc out of reset */
642 	if (sc->hwreset != NULL) {
643 		error = hwreset_deassert(sc->hwreset);
644 		if (error != 0) {
645 			device_printf(sc->dev, "Cannot deassert reset\n");
646 			goto fail;
647 		}
648 	}
649 #endif /* EXT_RESOURCES */
650 
651 	if (sc->bus_hz == 0) {
652 		device_printf(sc->dev, "No bus speed provided\n");
653 		goto fail;
654 	}
655 
656 	return (0);
657 
658 fail:
659 	return (ENXIO);
660 }
661 
662 int
663 dwmmc_attach(device_t dev)
664 {
665 	struct dwmmc_softc *sc;
666 	int error;
667 	int slot;
668 
669 	sc = device_get_softc(dev);
670 
671 	sc->dev = dev;
672 
673 	/* Why not to use Auto Stop? It save a hundred of irq per second */
674 	sc->use_auto_stop = 1;
675 
676 	error = parse_fdt(sc);
677 	if (error != 0) {
678 		device_printf(dev, "Can't get FDT property.\n");
679 		return (ENXIO);
680 	}
681 
682 	DWMMC_LOCK_INIT(sc);
683 
684 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
685 		device_printf(dev, "could not allocate resources\n");
686 		return (ENXIO);
687 	}
688 
689 	/* Setup interrupt handler. */
690 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
691 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
692 	if (error != 0) {
693 		device_printf(dev, "could not setup interrupt handler.\n");
694 		return (ENXIO);
695 	}
696 
697 	device_printf(dev, "Hardware version ID is %04x\n",
698 		READ4(sc, SDMMC_VERID) & 0xffff);
699 
700 	/* XXX: we support operation for slot index 0 only */
701 	slot = 0;
702 	if (sc->pwren_inverted) {
703 		WRITE4(sc, SDMMC_PWREN, (0 << slot));
704 	} else {
705 		WRITE4(sc, SDMMC_PWREN, (1 << slot));
706 	}
707 
708 	/* Reset all */
709 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
710 				  SDMMC_CTRL_FIFO_RESET |
711 				  SDMMC_CTRL_DMA_RESET)))
712 		return (ENXIO);
713 
714 	dwmmc_setup_bus(sc, sc->host.f_min);
715 
716 	if (sc->fifo_depth == 0) {
717 		sc->fifo_depth = 1 +
718 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
719 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
720 		    sc->fifo_depth);
721 	}
722 
723 	if (!sc->use_pio) {
724 		dma_stop(sc);
725 		if (dma_setup(sc))
726 			return (ENXIO);
727 
728 		/* Install desc base */
729 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
730 
731 		/* Enable DMA interrupts */
732 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
733 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
734 					   SDMMC_IDINTEN_RI |
735 					   SDMMC_IDINTEN_TI));
736 	}
737 
738 	/* Clear and disable interrups for a while */
739 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
740 	WRITE4(sc, SDMMC_INTMASK, 0);
741 
742 	/* Maximum timeout */
743 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
744 
745 	/* Enable interrupts */
746 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
747 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
748 				   SDMMC_INTMASK_DTO |
749 				   SDMMC_INTMASK_ACD |
750 				   SDMMC_INTMASK_TXDR |
751 				   SDMMC_INTMASK_RXDR |
752 				   DWMMC_ERR_FLAGS |
753 				   SDMMC_INTMASK_CD));
754 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
755 
756 	TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
757 	TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0,
758 		dwmmc_card_task, sc);
759 
760 #ifdef MMCCAM
761 	sc->ccb = NULL;
762 	if ((sc->devq = cam_simq_alloc(1)) == NULL) {
763 		goto fail;
764 	}
765 
766 	mtx_init(&sc->sim_mtx, "dwmmcsim", NULL, MTX_DEF);
767 	sc->sim = cam_sim_alloc_dev(dwmmc_cam_action, dwmmc_cam_poll,
768 	    "dw_mmc_sim", sc, dev,
769 	    &sc->sim_mtx, 1, 1, sc->devq);
770 
771 	if (sc->sim == NULL) {
772                 cam_simq_free(sc->devq);
773                 device_printf(dev, "cannot allocate CAM SIM\n");
774                 goto fail;
775         }
776 
777 	mtx_lock(&sc->sim_mtx);
778         if (xpt_bus_register(sc->sim, sc->dev, 0) != 0) {
779                 device_printf(sc->dev, "cannot register SCSI pass-through bus\n");
780                 cam_sim_free(sc->sim, FALSE);
781                 cam_simq_free(sc->devq);
782                 mtx_unlock(&sc->sim_mtx);
783                 goto fail;
784         }
785 
786 fail:
787         mtx_unlock(&sc->sim_mtx);
788 #endif
789 	/*
790 	 * Schedule a card detection as we won't get an interrupt
791 	 * if the card is inserted when we attach
792 	 */
793 	dwmmc_card_task(sc, 0);
794 	return (0);
795 }
796 
797 int
798 dwmmc_detach(device_t dev)
799 {
800 	struct dwmmc_softc *sc;
801 	int ret;
802 
803 	sc = device_get_softc(dev);
804 
805 	ret = device_delete_children(dev);
806 	if (ret != 0)
807 		return (ret);
808 
809 	taskqueue_drain(taskqueue_swi_giant, &sc->card_task);
810 	taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task);
811 
812 	if (sc->intr_cookie != NULL) {
813 		ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
814 		if (ret != 0)
815 			return (ret);
816 	}
817 	bus_release_resources(dev, dwmmc_spec, sc->res);
818 
819 	DWMMC_LOCK_DESTROY(sc);
820 
821 #ifdef EXT_RESOURCES
822 	if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0)
823 		device_printf(sc->dev, "cannot deassert reset\n");
824 	if (sc->biu != NULL && clk_disable(sc->biu) != 0)
825 		device_printf(sc->dev, "cannot disable biu clock\n");
826 	if (sc->ciu != NULL && clk_disable(sc->ciu) != 0)
827 			device_printf(sc->dev, "cannot disable ciu clock\n");
828 
829 	if (sc->vmmc && regulator_disable(sc->vmmc) != 0)
830 		device_printf(sc->dev, "Cannot disable vmmc regulator\n");
831 	if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0)
832 		device_printf(sc->dev, "Cannot disable vqmmc regulator\n");
833 #endif
834 
835 	return (0);
836 }
837 
838 static int
839 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
840 {
841 	int tout;
842 	int div;
843 
844 	if (freq == 0) {
845 		WRITE4(sc, SDMMC_CLKENA, 0);
846 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
847 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
848 
849 		tout = 1000;
850 		do {
851 			if (tout-- < 0) {
852 				device_printf(sc->dev, "Failed update clk\n");
853 				return (1);
854 			}
855 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
856 
857 		return (0);
858 	}
859 
860 	WRITE4(sc, SDMMC_CLKENA, 0);
861 	WRITE4(sc, SDMMC_CLKSRC, 0);
862 
863 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
864 
865 	WRITE4(sc, SDMMC_CLKDIV, div);
866 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
867 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
868 
869 	tout = 1000;
870 	do {
871 		if (tout-- < 0) {
872 			device_printf(sc->dev, "Failed to update clk");
873 			return (1);
874 		}
875 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
876 
877 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
878 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
879 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
880 
881 	tout = 1000;
882 	do {
883 		if (tout-- < 0) {
884 			device_printf(sc->dev, "Failed to enable clk\n");
885 			return (1);
886 		}
887 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
888 
889 	return (0);
890 }
891 
892 static int
893 dwmmc_update_ios(device_t brdev, device_t reqdev)
894 {
895 	struct dwmmc_softc *sc;
896 	struct mmc_ios *ios;
897 	uint32_t reg;
898 	int ret = 0;
899 
900 	sc = device_get_softc(brdev);
901 	ios = &sc->host.ios;
902 
903 	dprintf("Setting up clk %u bus_width %d\n",
904 		ios->clock, ios->bus_width);
905 
906 	if (ios->bus_width == bus_width_8)
907 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
908 	else if (ios->bus_width == bus_width_4)
909 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
910 	else
911 		WRITE4(sc, SDMMC_CTYPE, 0);
912 
913 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
914 		/* XXX: take care about DDR or SDR use here */
915 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
916 	}
917 
918 	/* Set DDR mode */
919 	reg = READ4(sc, SDMMC_UHS_REG);
920 	if (ios->timing == bus_timing_uhs_ddr50 ||
921 	    ios->timing == bus_timing_mmc_ddr52 ||
922 	    ios->timing == bus_timing_mmc_hs400)
923 		reg |= (SDMMC_UHS_REG_DDR);
924 	else
925 		reg &= ~(SDMMC_UHS_REG_DDR);
926 	WRITE4(sc, SDMMC_UHS_REG, reg);
927 
928 	if (sc->update_ios)
929 		ret = sc->update_ios(sc, ios);
930 
931 	dwmmc_setup_bus(sc, ios->clock);
932 
933 	return (ret);
934 }
935 
936 static int
937 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
938 {
939 	struct mmc_data *data;
940 
941 	data = cmd->data;
942 
943 	if (data->flags & MMC_DATA_WRITE)
944 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
945 			BUS_DMASYNC_POSTWRITE);
946 	else
947 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
948 			BUS_DMASYNC_POSTREAD);
949 
950 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
951 	    BUS_DMASYNC_POSTWRITE);
952 
953 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
954 
955 	return (0);
956 }
957 
958 static int
959 dma_stop(struct dwmmc_softc *sc)
960 {
961 	int reg;
962 
963 	reg = READ4(sc, SDMMC_CTRL);
964 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
965 	reg |= (SDMMC_CTRL_DMA_RESET);
966 	WRITE4(sc, SDMMC_CTRL, reg);
967 
968 	reg = READ4(sc, SDMMC_BMOD);
969 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
970 	reg |= (SDMMC_BMOD_SWR);
971 	WRITE4(sc, SDMMC_BMOD, reg);
972 
973 	return (0);
974 }
975 
976 static int
977 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
978 {
979 	struct mmc_data *data;
980 	int err;
981 	int reg;
982 
983 	data = cmd->data;
984 
985 	reg = READ4(sc, SDMMC_INTMASK);
986 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
987 	WRITE4(sc, SDMMC_INTMASK, reg);
988 
989 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
990 		data->data, data->len, dwmmc_ring_setup,
991 		sc, BUS_DMA_NOWAIT);
992 	if (err != 0)
993 		panic("dmamap_load failed\n");
994 
995 	/* Ensure the device can see the desc */
996 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
997 	    BUS_DMASYNC_PREWRITE);
998 
999 	if (data->flags & MMC_DATA_WRITE)
1000 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
1001 			BUS_DMASYNC_PREWRITE);
1002 	else
1003 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
1004 			BUS_DMASYNC_PREREAD);
1005 
1006 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1007 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1008 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1009 
1010 	WRITE4(sc, SDMMC_FIFOTH, reg);
1011 	wmb();
1012 
1013 	reg = READ4(sc, SDMMC_CTRL);
1014 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
1015 	WRITE4(sc, SDMMC_CTRL, reg);
1016 	wmb();
1017 
1018 	reg = READ4(sc, SDMMC_BMOD);
1019 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
1020 	WRITE4(sc, SDMMC_BMOD, reg);
1021 
1022 	/* Start */
1023 	WRITE4(sc, SDMMC_PLDMND, 1);
1024 
1025 	return (0);
1026 }
1027 
1028 static int
1029 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
1030 {
1031 	struct mmc_data *data;
1032 	int reg;
1033 
1034 	data = cmd->data;
1035 	data->xfer_len = 0;
1036 
1037 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1038 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1039 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1040 
1041 	WRITE4(sc, SDMMC_FIFOTH, reg);
1042 	wmb();
1043 
1044 	return (0);
1045 }
1046 
1047 static void
1048 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
1049 {
1050 	struct mmc_data *data;
1051 	uint32_t *p, status;
1052 
1053 	if (cmd == NULL || cmd->data == NULL)
1054 		return;
1055 
1056 	data = cmd->data;
1057 	if ((data->flags & MMC_DATA_READ) == 0)
1058 		return;
1059 
1060 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1061 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1062 
1063 	while (data->xfer_len < data->len) {
1064 		status = READ4(sc, SDMMC_STATUS);
1065 		if (status & SDMMC_STATUS_FIFO_EMPTY)
1066 			break;
1067 		*p++ = READ4(sc, SDMMC_DATA);
1068 		data->xfer_len += 4;
1069 	}
1070 
1071 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
1072 }
1073 
1074 static void
1075 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
1076 {
1077 	struct mmc_data *data;
1078 	uint32_t *p, status;
1079 
1080 	if (cmd == NULL || cmd->data == NULL)
1081 		return;
1082 
1083 	data = cmd->data;
1084 	if ((data->flags & MMC_DATA_WRITE) == 0)
1085 		return;
1086 
1087 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1088 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1089 
1090 	while (data->xfer_len < data->len) {
1091 		status = READ4(sc, SDMMC_STATUS);
1092 		if (status & SDMMC_STATUS_FIFO_FULL)
1093 			break;
1094 		WRITE4(sc, SDMMC_DATA, *p++);
1095 		data->xfer_len += 4;
1096 	}
1097 
1098 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
1099 }
1100 
1101 static void
1102 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
1103 {
1104 	struct mmc_data *data;
1105 	uint32_t blksz;
1106 	uint32_t cmdr;
1107 
1108 	dprintf("%s\n", __func__);
1109 	sc->curcmd = cmd;
1110 	data = cmd->data;
1111 
1112 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
1113 		dwmmc_setup_bus(sc, sc->host.ios.clock);
1114 
1115 #ifndef MMCCAM
1116 	/* XXX Upper layers don't always set this */
1117 	cmd->mrq = sc->req;
1118 #endif
1119 	/* Begin setting up command register. */
1120 
1121 	cmdr = cmd->opcode;
1122 
1123 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
1124 
1125 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
1126 	    cmd->opcode == MMC_GO_IDLE_STATE ||
1127 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
1128 		cmdr |= SDMMC_CMD_STOP_ABORT;
1129 	else if (cmd->opcode != MMC_SEND_STATUS && data)
1130 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
1131 
1132 	/* Set up response handling. */
1133 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
1134 		cmdr |= SDMMC_CMD_RESP_EXP;
1135 		if (cmd->flags & MMC_RSP_136)
1136 			cmdr |= SDMMC_CMD_RESP_LONG;
1137 	}
1138 
1139 	if (cmd->flags & MMC_RSP_CRC)
1140 		cmdr |= SDMMC_CMD_RESP_CRC;
1141 
1142 	/*
1143 	 * XXX: Not all platforms want this.
1144 	 */
1145 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
1146 
1147 	if ((sc->flags & CARD_INIT_DONE) == 0) {
1148 		sc->flags |= (CARD_INIT_DONE);
1149 		cmdr |= SDMMC_CMD_SEND_INIT;
1150 	}
1151 
1152 	if (data) {
1153 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1154 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
1155 		     sc->use_auto_stop)
1156 			cmdr |= SDMMC_CMD_SEND_ASTOP;
1157 
1158 		cmdr |= SDMMC_CMD_DATA_EXP;
1159 		if (data->flags & MMC_DATA_STREAM)
1160 			cmdr |= SDMMC_CMD_MODE_STREAM;
1161 		if (data->flags & MMC_DATA_WRITE)
1162 			cmdr |= SDMMC_CMD_DATA_WRITE;
1163 
1164 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
1165 		WRITE4(sc, SDMMC_BYTCNT, data->len);
1166 		blksz = (data->len < MMC_SECTOR_SIZE) ? \
1167 			 data->len : MMC_SECTOR_SIZE;
1168 		WRITE4(sc, SDMMC_BLKSIZ, blksz);
1169 
1170 		if (sc->use_pio) {
1171 			pio_prepare(sc, cmd);
1172 		} else {
1173 			dma_prepare(sc, cmd);
1174 		}
1175 		wmb();
1176 	}
1177 
1178 	dprintf("cmdr 0x%08x\n", cmdr);
1179 
1180 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
1181 	wmb();
1182 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
1183 };
1184 
1185 static void
1186 dwmmc_next_operation(struct dwmmc_softc *sc)
1187 {
1188 	struct mmc_command *cmd;
1189 	dprintf("%s\n", __func__);
1190 #ifdef MMCCAM
1191 	union ccb *ccb;
1192 
1193 	ccb = sc->ccb;
1194 	if (ccb == NULL)
1195 		return;
1196 	cmd = &ccb->mmcio.cmd;
1197 #else
1198 	struct mmc_request *req;
1199 
1200 	req = sc->req;
1201 	if (req == NULL)
1202 		return;
1203 	cmd = req->cmd;
1204 #endif
1205 
1206 	sc->acd_rcvd = 0;
1207 	sc->dto_rcvd = 0;
1208 	sc->cmd_done = 0;
1209 
1210 	/*
1211 	 * XXX: Wait until card is still busy.
1212 	 * We do need this to prevent data timeouts,
1213 	 * mostly caused by multi-block write command
1214 	 * followed by single-read.
1215 	 */
1216 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1217 		continue;
1218 
1219 	if (sc->flags & PENDING_CMD) {
1220 		sc->flags &= ~PENDING_CMD;
1221 		dwmmc_start_cmd(sc, cmd);
1222 		return;
1223 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1224 		sc->flags &= ~PENDING_STOP;
1225 		/// XXX: What to do with this?
1226 		//dwmmc_start_cmd(sc, req->stop);
1227 		return;
1228 	}
1229 
1230 #ifdef MMCCAM
1231 	sc->ccb = NULL;
1232 	sc->curcmd = NULL;
1233 	ccb->ccb_h.status =
1234 		(ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
1235 	xpt_done(ccb);
1236 #else
1237 	sc->req = NULL;
1238 	sc->curcmd = NULL;
1239 	req->done(req);
1240 #endif
1241 }
1242 
1243 static int
1244 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1245 {
1246 	struct dwmmc_softc *sc;
1247 
1248 	sc = device_get_softc(brdev);
1249 
1250 	dprintf("%s\n", __func__);
1251 
1252 	DWMMC_LOCK(sc);
1253 
1254 #ifdef MMCCAM
1255 	sc->flags |= PENDING_CMD;
1256 #else
1257 	if (sc->req != NULL) {
1258 		DWMMC_UNLOCK(sc);
1259 		return (EBUSY);
1260 	}
1261 
1262 	sc->req = req;
1263 	sc->flags |= PENDING_CMD;
1264 	if (sc->req->stop)
1265 		sc->flags |= PENDING_STOP;
1266 #endif
1267 	dwmmc_next_operation(sc);
1268 
1269 	DWMMC_UNLOCK(sc);
1270 	return (0);
1271 }
1272 
1273 static int
1274 dwmmc_get_ro(device_t brdev, device_t reqdev)
1275 {
1276 
1277 	dprintf("%s\n", __func__);
1278 
1279 	return (0);
1280 }
1281 
1282 static int
1283 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1284 {
1285 	struct dwmmc_softc *sc;
1286 
1287 	sc = device_get_softc(brdev);
1288 
1289 	DWMMC_LOCK(sc);
1290 	while (sc->bus_busy)
1291 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1292 	sc->bus_busy++;
1293 	DWMMC_UNLOCK(sc);
1294 	return (0);
1295 }
1296 
1297 static int
1298 dwmmc_release_host(device_t brdev, device_t reqdev)
1299 {
1300 	struct dwmmc_softc *sc;
1301 
1302 	sc = device_get_softc(brdev);
1303 
1304 	DWMMC_LOCK(sc);
1305 	sc->bus_busy--;
1306 	wakeup(sc);
1307 	DWMMC_UNLOCK(sc);
1308 	return (0);
1309 }
1310 
1311 static int
1312 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1313 {
1314 	struct dwmmc_softc *sc;
1315 
1316 	sc = device_get_softc(bus);
1317 
1318 	switch (which) {
1319 	default:
1320 		return (EINVAL);
1321 	case MMCBR_IVAR_BUS_MODE:
1322 		*(int *)result = sc->host.ios.bus_mode;
1323 		break;
1324 	case MMCBR_IVAR_BUS_WIDTH:
1325 		*(int *)result = sc->host.ios.bus_width;
1326 		break;
1327 	case MMCBR_IVAR_CHIP_SELECT:
1328 		*(int *)result = sc->host.ios.chip_select;
1329 		break;
1330 	case MMCBR_IVAR_CLOCK:
1331 		*(int *)result = sc->host.ios.clock;
1332 		break;
1333 	case MMCBR_IVAR_F_MIN:
1334 		*(int *)result = sc->host.f_min;
1335 		break;
1336 	case MMCBR_IVAR_F_MAX:
1337 		*(int *)result = sc->host.f_max;
1338 		break;
1339 	case MMCBR_IVAR_HOST_OCR:
1340 		*(int *)result = sc->host.host_ocr;
1341 		break;
1342 	case MMCBR_IVAR_MODE:
1343 		*(int *)result = sc->host.mode;
1344 		break;
1345 	case MMCBR_IVAR_OCR:
1346 		*(int *)result = sc->host.ocr;
1347 		break;
1348 	case MMCBR_IVAR_POWER_MODE:
1349 		*(int *)result = sc->host.ios.power_mode;
1350 		break;
1351 	case MMCBR_IVAR_VDD:
1352 		*(int *)result = sc->host.ios.vdd;
1353 		break;
1354 	case MMCBR_IVAR_VCCQ:
1355 		*(int *)result = sc->host.ios.vccq;
1356 		break;
1357 	case MMCBR_IVAR_CAPS:
1358 		*(int *)result = sc->host.caps;
1359 		break;
1360 	case MMCBR_IVAR_MAX_DATA:
1361 		*(int *)result = (IDMAC_MAX_SIZE * IDMAC_DESC_SEGS) / MMC_SECTOR_SIZE;
1362 		break;
1363 	case MMCBR_IVAR_TIMING:
1364 		*(int *)result = sc->host.ios.timing;
1365 		break;
1366 	}
1367 	return (0);
1368 }
1369 
1370 static int
1371 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1372 {
1373 	struct dwmmc_softc *sc;
1374 
1375 	sc = device_get_softc(bus);
1376 
1377 	switch (which) {
1378 	default:
1379 		return (EINVAL);
1380 	case MMCBR_IVAR_BUS_MODE:
1381 		sc->host.ios.bus_mode = value;
1382 		break;
1383 	case MMCBR_IVAR_BUS_WIDTH:
1384 		sc->host.ios.bus_width = value;
1385 		break;
1386 	case MMCBR_IVAR_CHIP_SELECT:
1387 		sc->host.ios.chip_select = value;
1388 		break;
1389 	case MMCBR_IVAR_CLOCK:
1390 		sc->host.ios.clock = value;
1391 		break;
1392 	case MMCBR_IVAR_MODE:
1393 		sc->host.mode = value;
1394 		break;
1395 	case MMCBR_IVAR_OCR:
1396 		sc->host.ocr = value;
1397 		break;
1398 	case MMCBR_IVAR_POWER_MODE:
1399 		sc->host.ios.power_mode = value;
1400 		break;
1401 	case MMCBR_IVAR_VDD:
1402 		sc->host.ios.vdd = value;
1403 		break;
1404 	case MMCBR_IVAR_TIMING:
1405 		sc->host.ios.timing = value;
1406 		break;
1407 	case MMCBR_IVAR_VCCQ:
1408 		sc->host.ios.vccq = value;
1409 		break;
1410 	/* These are read-only */
1411 	case MMCBR_IVAR_CAPS:
1412 	case MMCBR_IVAR_HOST_OCR:
1413 	case MMCBR_IVAR_F_MIN:
1414 	case MMCBR_IVAR_F_MAX:
1415 	case MMCBR_IVAR_MAX_DATA:
1416 		return (EINVAL);
1417 	}
1418 	return (0);
1419 }
1420 
1421 /* Note: this function likely belongs to the specific driver impl */
1422 static int
1423 dwmmc_switch_vccq(device_t dev, device_t child)
1424 {
1425 	device_printf(dev, "This is a default impl of switch_vccq() that always fails\n");
1426 	return EINVAL;
1427 }
1428 
1429 #ifdef MMCCAM
1430 static void
1431 dwmmc_cam_handle_mmcio(struct cam_sim *sim, union ccb *ccb)
1432 {
1433 	struct dwmmc_softc *sc;
1434 
1435 	sc = cam_sim_softc(sim);
1436 
1437 	dwmmc_cam_request(sc, ccb);
1438 }
1439 
1440 static void
1441 dwmmc_cam_action(struct cam_sim *sim, union ccb *ccb)
1442 {
1443 	struct dwmmc_softc *sc;
1444 
1445 	sc = cam_sim_softc(sim);
1446 	if (sc == NULL) {
1447 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1448 		xpt_done(ccb);
1449 		return;
1450 	}
1451 
1452 	mtx_assert(&sc->sim_mtx, MA_OWNED);
1453 
1454 	switch (ccb->ccb_h.func_code) {
1455 	case XPT_PATH_INQ:
1456 		/* XXX: correctly calculate maxio here */
1457 		mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim, MMC_SECTOR_SIZE);
1458 		break;
1459 
1460 	case XPT_GET_TRAN_SETTINGS:
1461 	{
1462 		struct ccb_trans_settings *cts = &ccb->cts;
1463 
1464 		if (bootverbose)
1465 			device_printf(sc->dev, "Got XPT_GET_TRAN_SETTINGS\n");
1466 
1467 		cts->protocol = PROTO_MMCSD;
1468 		cts->protocol_version = 1;
1469 		cts->transport = XPORT_MMCSD;
1470 		cts->transport_version = 1;
1471 		cts->xport_specific.valid = 0;
1472 		cts->proto_specific.mmc.host_ocr = sc->host.host_ocr;
1473 		cts->proto_specific.mmc.host_f_min = sc->host.f_min;
1474 		cts->proto_specific.mmc.host_f_max = sc->host.f_max;
1475 		cts->proto_specific.mmc.host_caps = sc->host.caps;
1476 		/* XXX: correctly calculate host_max_data */
1477 		cts->proto_specific.mmc.host_max_data = 1;
1478 		memcpy(&cts->proto_specific.mmc.ios, &sc->host.ios, sizeof(struct mmc_ios));
1479 		ccb->ccb_h.status = CAM_REQ_CMP;
1480 		break;
1481 	}
1482 	case XPT_SET_TRAN_SETTINGS:
1483 	{
1484 		if (bootverbose)
1485 			device_printf(sc->dev, "Got XPT_SET_TRAN_SETTINGS\n");
1486 		dwmmc_cam_settran_settings(sc, ccb);
1487 		ccb->ccb_h.status = CAM_REQ_CMP;
1488 		break;
1489 	}
1490 	case XPT_RESET_BUS: {
1491 		struct ccb_trans_settings_mmc *cts;
1492 
1493 		cts = &ccb->cts.proto_specific.mmc;
1494 		cts->ios_valid = MMC_PM;
1495 		cts->ios.power_mode = power_off;
1496 		/* Power off the MMC bus */
1497 		if (dwmmc_cam_settran_settings(sc, ccb) != 0) {
1498 			device_printf(sc->dev,"cannot power down the MMC bus\n");
1499 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1500 			break;
1501 		}
1502 
1503 		/* Soft Reset controller and run initialization again */
1504 		if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
1505 				  SDMMC_CTRL_FIFO_RESET |
1506 				  SDMMC_CTRL_DMA_RESET)) != 0) {
1507 			device_printf(sc->dev, "cannot reset the controller\n");
1508 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1509 			break;
1510 		}
1511 
1512 		cts->ios_valid = MMC_PM;
1513 		cts->ios.power_mode = power_on;
1514 		/* Power off the MMC bus */
1515 		if (dwmmc_cam_settran_settings(sc, ccb) != 0) {
1516 			device_printf(sc->dev, "cannot power on the MMC bus\n");
1517 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1518 			break;
1519 		}
1520 
1521 		ccb->ccb_h.status = CAM_REQ_CMP;
1522 		break;
1523 	}
1524 	case XPT_MMC_IO:
1525 		/*
1526 		 * Here is the HW-dependent part of
1527 		 * sending the command to the underlying h/w
1528 		 * At some point in the future an interrupt comes.
1529 		 * Then the request will be marked as completed.
1530 		 */
1531 		ccb->ccb_h.status = CAM_REQ_INPROG;
1532 
1533 		dwmmc_cam_handle_mmcio(sim, ccb);
1534 		return;
1535 		/* NOTREACHED */
1536 		break;
1537 	default:
1538 		ccb->ccb_h.status = CAM_REQ_INVALID;
1539 		break;
1540 	}
1541 	xpt_done(ccb);
1542 	return;
1543 }
1544 
1545 static void
1546 dwmmc_cam_poll(struct cam_sim *sim)
1547 {
1548 	return;
1549 }
1550 
1551 static int
1552 dwmmc_cam_settran_settings(struct dwmmc_softc *sc, union ccb *ccb)
1553 {
1554 	struct mmc_ios *ios;
1555 	struct mmc_ios *new_ios;
1556 	struct ccb_trans_settings_mmc *cts;
1557 	int res;
1558 
1559 	ios = &sc->host.ios;
1560 
1561 	cts = &ccb->cts.proto_specific.mmc;
1562 	new_ios = &cts->ios;
1563 
1564 	/* Update only requested fields */
1565 	if (cts->ios_valid & MMC_CLK) {
1566 		ios->clock = new_ios->clock;
1567 		if (bootverbose)
1568 			device_printf(sc->dev, "Clock => %d\n", ios->clock);
1569 	}
1570 	if (cts->ios_valid & MMC_VDD) {
1571 		ios->vdd = new_ios->vdd;
1572 		if (bootverbose)
1573 			device_printf(sc->dev, "VDD => %d\n", ios->vdd);
1574 	}
1575 	if (cts->ios_valid & MMC_CS) {
1576 		ios->chip_select = new_ios->chip_select;
1577 		if (bootverbose)
1578 			device_printf(sc->dev, "CS => %d\n", ios->chip_select);
1579 	}
1580 	if (cts->ios_valid & MMC_BW) {
1581 		ios->bus_width = new_ios->bus_width;
1582 		if (bootverbose)
1583 			device_printf(sc->dev, "Bus width => %d\n", ios->bus_width);
1584 	}
1585 	if (cts->ios_valid & MMC_PM) {
1586 		ios->power_mode = new_ios->power_mode;
1587 		if (bootverbose)
1588 			device_printf(sc->dev, "Power mode => %d\n", ios->power_mode);
1589 	}
1590 	if (cts->ios_valid & MMC_BT) {
1591 		ios->timing = new_ios->timing;
1592 		if (bootverbose)
1593 			device_printf(sc->dev, "Timing => %d\n", ios->timing);
1594 	}
1595 	if (cts->ios_valid & MMC_BM) {
1596 		ios->bus_mode = new_ios->bus_mode;
1597 		if (bootverbose)
1598 			device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode);
1599 	}
1600 	if (cts->ios_valid & MMC_VCCQ) {
1601 		ios->vccq = new_ios->vccq;
1602 		if (bootverbose)
1603 			device_printf(sc->dev, "VCCQ => %d\n", ios->vccq);
1604 		res = dwmmc_switch_vccq(sc->dev, NULL);
1605 		device_printf(sc->dev, "VCCQ switch result: %d\n", res);
1606 	}
1607 
1608 	return (dwmmc_update_ios(sc->dev, NULL));
1609 }
1610 
1611 static int
1612 dwmmc_cam_request(struct dwmmc_softc *sc, union ccb *ccb)
1613 {
1614 	struct ccb_mmcio *mmcio;
1615 
1616 	mmcio = &ccb->mmcio;
1617 
1618 	DWMMC_LOCK(sc);
1619 
1620 #ifdef DEBUG
1621 	if (__predict_false(bootverbose)) {
1622 		device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1623 			    mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
1624 			    mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
1625 			    mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
1626 	}
1627 #endif
1628 	if (mmcio->cmd.data != NULL) {
1629 		if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
1630 			panic("data->len = %d, data->flags = %d -- something is b0rked",
1631 			      (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
1632 	}
1633 	if (sc->ccb != NULL) {
1634 		device_printf(sc->dev, "Controller still has an active command\n");
1635 		return (EBUSY);
1636 	}
1637 	sc->ccb = ccb;
1638 	DWMMC_UNLOCK(sc);
1639 	dwmmc_request(sc->dev, NULL, NULL);
1640 
1641 	return (0);
1642 }
1643 #endif
1644 
1645 static device_method_t dwmmc_methods[] = {
1646 	/* Bus interface */
1647 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1648 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1649 
1650 	/* mmcbr_if */
1651 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1652 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1653 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1654 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1655 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1656 
1657 	DEVMETHOD_END
1658 };
1659 
1660 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1661     sizeof(struct dwmmc_softc));
1662