xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision 184c1b943937986c81e1996d999d21626ec7a4ff)
1 /*-
2  * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/rman.h>
48 #include <sys/queue.h>
49 #include <sys/taskqueue.h>
50 
51 #include <dev/mmc/bridge.h>
52 #include <dev/mmc/mmcbrvar.h>
53 #include <dev/mmc/mmc_fdt_helpers.h>
54 
55 #include <dev/fdt/fdt_common.h>
56 #include <dev/ofw/openfirm.h>
57 #include <dev/ofw/ofw_bus.h>
58 #include <dev/ofw/ofw_bus_subr.h>
59 
60 #include <machine/bus.h>
61 #include <machine/cpu.h>
62 #include <machine/intr.h>
63 
64 #ifdef EXT_RESOURCES
65 #include <dev/extres/clk/clk.h>
66 #endif
67 
68 #include <dev/mmc/host/dwmmc_reg.h>
69 #include <dev/mmc/host/dwmmc_var.h>
70 
71 #include "opt_mmccam.h"
72 
73 #ifdef MMCCAM
74 #include <cam/cam.h>
75 #include <cam/cam_ccb.h>
76 #include <cam/cam_debug.h>
77 #include <cam/cam_sim.h>
78 #include <cam/cam_xpt_sim.h>
79 #endif
80 
81 #include "mmcbr_if.h"
82 
83 #ifdef DEBUG
84 #define dprintf(fmt, args...) printf(fmt, ##args)
85 #else
86 #define dprintf(x, arg...)
87 #endif
88 
89 #define	READ4(_sc, _reg) \
90 	bus_read_4((_sc)->res[0], _reg)
91 #define	WRITE4(_sc, _reg, _val) \
92 	bus_write_4((_sc)->res[0], _reg, _val)
93 
94 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
95 
96 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
97 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
98 #define	DWMMC_LOCK_INIT(_sc) \
99 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
100 	    "dwmmc", MTX_DEF)
101 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
102 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
103 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
104 
105 #define	PENDING_CMD	0x01
106 #define	PENDING_STOP	0x02
107 #define	CARD_INIT_DONE	0x04
108 
109 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
110 				|SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE)
111 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
112 				|SDMMC_INTMASK_RE)
113 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
114 				|SDMMC_INTMASK_HLE)
115 
116 #define	DES0_DIC	(1 << 1)	/* Disable Interrupt on Completion */
117 #define	DES0_LD		(1 << 2)	/* Last Descriptor */
118 #define	DES0_FS		(1 << 3)	/* First Descriptor */
119 #define	DES0_CH		(1 << 4)	/* second address CHained */
120 #define	DES0_ER		(1 << 5)	/* End of Ring */
121 #define	DES0_CES	(1 << 30)	/* Card Error Summary */
122 #define	DES0_OWN	(1 << 31)	/* OWN */
123 
124 #define	DES1_BS1_MASK	0x1fff
125 
126 struct idmac_desc {
127 	uint32_t	des0;	/* control */
128 	uint32_t	des1;	/* bufsize */
129 	uint32_t	des2;	/* buf1 phys addr */
130 	uint32_t	des3;	/* buf2 phys addr or next descr */
131 };
132 
133 #define	IDMAC_DESC_SEGS	(PAGE_SIZE / (sizeof(struct idmac_desc)))
134 #define	IDMAC_DESC_SIZE	(sizeof(struct idmac_desc) * IDMAC_DESC_SEGS)
135 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
136 /*
137  * Size field in DMA descriptor is 13 bits long (up to 4095 bytes),
138  * but must be a multiple of the data bus size.Additionally, we must ensure
139  * that bus_dmamap_load() doesn't additionally fragments buffer (because it
140  * is processed with page size granularity). Thus limit fragment size to half
141  * of page.
142  * XXX switch descriptor format to array and use second buffer pointer for
143  * second half of page
144  */
145 #define	IDMAC_MAX_SIZE	2048
146 
147 static void dwmmc_next_operation(struct dwmmc_softc *);
148 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
149 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
150 static int dma_stop(struct dwmmc_softc *);
151 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
152 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
153 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present);
154 #ifdef MMCCAM
155 static int dwmmc_switch_vccq(device_t, device_t);
156 static void dwmmc_cam_action(struct cam_sim *, union ccb *);
157 static void dwmmc_cam_poll(struct cam_sim *);
158 static int dwmmc_cam_settran_settings(struct dwmmc_softc *, union ccb *);
159 static int dwmmc_cam_request(struct dwmmc_softc *, union ccb *);
160 static void dwmmc_cam_handle_mmcio(struct cam_sim *, union ccb *);
161 #endif
162 
163 static struct resource_spec dwmmc_spec[] = {
164 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
165 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
166 	{ -1, 0 }
167 };
168 
169 #define	HWTYPE_MASK		(0x0000ffff)
170 #define	HWFLAG_MASK		(0xffff << 16)
171 
172 static void
173 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
174 {
175 
176 	if (nsegs != 1)
177 		panic("%s: nsegs != 1 (%d)\n", __func__, nsegs);
178 	if (error != 0)
179 		panic("%s: error != 0 (%d)\n", __func__, error);
180 
181 	*(bus_addr_t *)arg = segs[0].ds_addr;
182 }
183 
184 static void
185 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
186 {
187 	struct dwmmc_softc *sc;
188 	int idx;
189 
190 	sc = arg;
191 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
192 	if (error != 0)
193 		panic("%s: error != 0 (%d)\n", __func__, error);
194 
195 	for (idx = 0; idx < nsegs; idx++) {
196 		sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH;
197 		sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK;
198 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
199 
200 		if (idx == 0)
201 			sc->desc_ring[idx].des0 |= DES0_FS;
202 
203 		if (idx == (nsegs - 1)) {
204 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
205 			sc->desc_ring[idx].des0 |= DES0_LD;
206 		}
207 		wmb();
208 		sc->desc_ring[idx].des0 |= DES0_OWN;
209 	}
210 }
211 
212 static int
213 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
214 {
215 	int reg;
216 	int i;
217 
218 	reg = READ4(sc, SDMMC_CTRL);
219 	reg |= (reset_bits);
220 	WRITE4(sc, SDMMC_CTRL, reg);
221 
222 	/* Wait reset done */
223 	for (i = 0; i < 100; i++) {
224 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
225 			return (0);
226 		DELAY(10);
227 	}
228 
229 	device_printf(sc->dev, "Reset failed\n");
230 
231 	return (1);
232 }
233 
234 static int
235 dma_setup(struct dwmmc_softc *sc)
236 {
237 	int error;
238 	int nidx;
239 	int idx;
240 
241 	/*
242 	 * Set up TX descriptor ring, descriptors, and dma maps.
243 	 */
244 	error = bus_dma_tag_create(
245 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
246 	    4096, 0,			/* alignment, boundary */
247 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
248 	    BUS_SPACE_MAXADDR,		/* highaddr */
249 	    NULL, NULL,			/* filter, filterarg */
250 	    IDMAC_DESC_SIZE, 1,		/* maxsize, nsegments */
251 	    IDMAC_DESC_SIZE,		/* maxsegsize */
252 	    0,				/* flags */
253 	    NULL, NULL,			/* lockfunc, lockarg */
254 	    &sc->desc_tag);
255 	if (error != 0) {
256 		device_printf(sc->dev,
257 		    "could not create ring DMA tag.\n");
258 		return (1);
259 	}
260 
261 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
262 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
263 	    &sc->desc_map);
264 	if (error != 0) {
265 		device_printf(sc->dev,
266 		    "could not allocate descriptor ring.\n");
267 		return (1);
268 	}
269 
270 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
271 	    sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr,
272 	    &sc->desc_ring_paddr, 0);
273 	if (error != 0) {
274 		device_printf(sc->dev,
275 		    "could not load descriptor ring map.\n");
276 		return (1);
277 	}
278 
279 	for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) {
280 		sc->desc_ring[idx].des0 = DES0_CH;
281 		sc->desc_ring[idx].des1 = 0;
282 		nidx = (idx + 1) % IDMAC_DESC_SEGS;
283 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
284 		    (nidx * sizeof(struct idmac_desc));
285 	}
286 	sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr;
287 	sc->desc_ring[idx - 1].des0 |= DES0_ER;
288 
289 	error = bus_dma_tag_create(
290 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
291 	    8, 0,			/* alignment, boundary */
292 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
293 	    BUS_SPACE_MAXADDR,		/* highaddr */
294 	    NULL, NULL,			/* filter, filterarg */
295 	    IDMAC_MAX_SIZE * IDMAC_DESC_SEGS,	/* maxsize */
296 	    IDMAC_DESC_SEGS,		/* nsegments */
297 	    IDMAC_MAX_SIZE,		/* maxsegsize */
298 	    0,				/* flags */
299 	    NULL, NULL,			/* lockfunc, lockarg */
300 	    &sc->buf_tag);
301 	if (error != 0) {
302 		device_printf(sc->dev,
303 		    "could not create ring DMA tag.\n");
304 		return (1);
305 	}
306 
307 	error = bus_dmamap_create(sc->buf_tag, 0,
308 	    &sc->buf_map);
309 	if (error != 0) {
310 		device_printf(sc->dev,
311 		    "could not create TX buffer DMA map.\n");
312 		return (1);
313 	}
314 
315 	return (0);
316 }
317 
318 static void
319 dwmmc_cmd_done(struct dwmmc_softc *sc)
320 {
321 	struct mmc_command *cmd;
322 #ifdef MMCCAM
323 	union ccb *ccb;
324 #endif
325 
326 #ifdef MMCCAM
327 	ccb = sc->ccb;
328 	if (ccb == NULL)
329 		return;
330 	cmd = &ccb->mmcio.cmd;
331 #else
332 	cmd = sc->curcmd;
333 #endif
334 	if (cmd == NULL)
335 		return;
336 
337 	if (cmd->flags & MMC_RSP_PRESENT) {
338 		if (cmd->flags & MMC_RSP_136) {
339 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
340 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
341 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
342 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
343 		} else {
344 			cmd->resp[3] = 0;
345 			cmd->resp[2] = 0;
346 			cmd->resp[1] = 0;
347 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
348 		}
349 	}
350 }
351 
352 static void
353 dwmmc_tasklet(struct dwmmc_softc *sc)
354 {
355 	struct mmc_command *cmd;
356 
357 	cmd = sc->curcmd;
358 	if (cmd == NULL)
359 		return;
360 
361 	if (!sc->cmd_done)
362 		return;
363 
364 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
365 		dwmmc_next_operation(sc);
366 	} else if (cmd->data && sc->dto_rcvd) {
367 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
368 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
369 		     sc->use_auto_stop) {
370 			if (sc->acd_rcvd)
371 				dwmmc_next_operation(sc);
372 		} else {
373 			dwmmc_next_operation(sc);
374 		}
375 	}
376 }
377 
378 static void
379 dwmmc_intr(void *arg)
380 {
381 	struct mmc_command *cmd;
382 	struct dwmmc_softc *sc;
383 	uint32_t reg;
384 
385 	sc = arg;
386 
387 	DWMMC_LOCK(sc);
388 
389 	cmd = sc->curcmd;
390 
391 	/* First handle SDMMC controller interrupts */
392 	reg = READ4(sc, SDMMC_MINTSTS);
393 	if (reg) {
394 		dprintf("%s 0x%08x\n", __func__, reg);
395 
396 		if (reg & DWMMC_CMD_ERR_FLAGS) {
397 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
398 				reg, cmd->opcode);
399 			cmd->error = MMC_ERR_TIMEOUT;
400 		}
401 
402 		if (reg & DWMMC_DATA_ERR_FLAGS) {
403 			dprintf("data err 0x%08x cmd 0x%08x\n",
404 				reg, cmd->opcode);
405 			cmd->error = MMC_ERR_FAILED;
406 			if (!sc->use_pio) {
407 				dma_done(sc, cmd);
408 				dma_stop(sc);
409 			}
410 		}
411 
412 		if (reg & SDMMC_INTMASK_CMD_DONE) {
413 			dwmmc_cmd_done(sc);
414 			sc->cmd_done = 1;
415 		}
416 
417 		if (reg & SDMMC_INTMASK_ACD)
418 			sc->acd_rcvd = 1;
419 
420 		if (reg & SDMMC_INTMASK_DTO)
421 			sc->dto_rcvd = 1;
422 
423 		if (reg & SDMMC_INTMASK_CD) {
424 			dwmmc_handle_card_present(sc,
425 			    READ4(sc, SDMMC_CDETECT) == 0 ? true : false);
426 		}
427 	}
428 
429 	/* Ack interrupts */
430 	WRITE4(sc, SDMMC_RINTSTS, reg);
431 
432 	if (sc->use_pio) {
433 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
434 			pio_read(sc, cmd);
435 		}
436 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
437 			pio_write(sc, cmd);
438 		}
439 	} else {
440 		/* Now handle DMA interrupts */
441 		reg = READ4(sc, SDMMC_IDSTS);
442 		if (reg) {
443 			dprintf("dma intr 0x%08x\n", reg);
444 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
445 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
446 							 SDMMC_IDINTEN_RI));
447 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
448 				dma_done(sc, cmd);
449 			}
450 		}
451 	}
452 
453 	dwmmc_tasklet(sc);
454 
455 	DWMMC_UNLOCK(sc);
456 }
457 
458 static void
459 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
460 {
461 	bool was_present;
462 
463 	was_present = sc->child != NULL;
464 
465 	if (!was_present && is_present) {
466 		taskqueue_enqueue_timeout(taskqueue_swi_giant,
467 		  &sc->card_delayed_task, -(hz / 2));
468 	} else if (was_present && !is_present) {
469 		taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task);
470 	}
471 }
472 
473 static void
474 dwmmc_card_task(void *arg, int pending __unused)
475 {
476 	struct dwmmc_softc *sc = arg;
477 
478 #ifdef MMCCAM
479 	mmccam_start_discovery(sc->sim);
480 #else
481 	DWMMC_LOCK(sc);
482 
483 	if (READ4(sc, SDMMC_CDETECT) == 0) {
484 		if (sc->child == NULL) {
485 			if (bootverbose)
486 				device_printf(sc->dev, "Card inserted\n");
487 
488 			sc->child = device_add_child(sc->dev, "mmc", -1);
489 			DWMMC_UNLOCK(sc);
490 			if (sc->child) {
491 				device_set_ivars(sc->child, sc);
492 				(void)device_probe_and_attach(sc->child);
493 			}
494 		} else
495 			DWMMC_UNLOCK(sc);
496 	} else {
497 		/* Card isn't present, detach if necessary */
498 		if (sc->child != NULL) {
499 			if (bootverbose)
500 				device_printf(sc->dev, "Card removed\n");
501 
502 			DWMMC_UNLOCK(sc);
503 			device_delete_child(sc->dev, sc->child);
504 			sc->child = NULL;
505 		} else
506 			DWMMC_UNLOCK(sc);
507 	}
508 #endif /* MMCCAM */
509 }
510 
511 static int
512 parse_fdt(struct dwmmc_softc *sc)
513 {
514 	pcell_t dts_value[3];
515 	phandle_t node;
516 	uint32_t bus_hz = 0;
517 	int len;
518 #ifdef EXT_RESOURCES
519 	int error;
520 #endif
521 
522 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
523 		return (ENXIO);
524 
525 	/* Set some defaults for freq and supported mode */
526 	sc->host.f_min = 400000;
527 	sc->host.f_max = 200000000;
528 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
529 	sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
530 	mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host);
531 
532 	/* fifo-depth */
533 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
534 		OF_getencprop(node, "fifo-depth", dts_value, len);
535 		sc->fifo_depth = dts_value[0];
536 	}
537 
538 	/* num-slots (Deprecated) */
539 	sc->num_slots = 1;
540 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
541 		device_printf(sc->dev, "num-slots property is deprecated\n");
542 		OF_getencprop(node, "num-slots", dts_value, len);
543 		sc->num_slots = dts_value[0];
544 	}
545 
546 	/* clock-frequency */
547 	if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
548 		OF_getencprop(node, "clock-frequency", dts_value, len);
549 		bus_hz = dts_value[0];
550 	}
551 
552 #ifdef EXT_RESOURCES
553 
554 	/* IP block reset is optional */
555 	error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset);
556 	if (error != 0 &&
557 	    error != ENOENT &&
558 	    error != ENODEV) {
559 		device_printf(sc->dev, "Cannot get reset\n");
560 		goto fail;
561 	}
562 
563 	/* vmmc regulator is optional */
564 	error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply",
565 	     &sc->vmmc);
566 	if (error != 0 &&
567 	    error != ENOENT &&
568 	    error != ENODEV) {
569 		device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n");
570 		goto fail;
571 	}
572 
573 	/* vqmmc regulator is optional */
574 	error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply",
575 	     &sc->vqmmc);
576 	if (error != 0 &&
577 	    error != ENOENT &&
578 	    error != ENODEV) {
579 		device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n");
580 		goto fail;
581 	}
582 
583 	/* Assert reset first */
584 	if (sc->hwreset != NULL) {
585 		error = hwreset_assert(sc->hwreset);
586 		if (error != 0) {
587 			device_printf(sc->dev, "Cannot assert reset\n");
588 			goto fail;
589 		}
590 	}
591 
592 	/* BIU (Bus Interface Unit clock) is optional */
593 	error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
594 	if (error != 0 &&
595 	    error != ENOENT &&
596 	    error != ENODEV) {
597 		device_printf(sc->dev, "Cannot get 'biu' clock\n");
598 		goto fail;
599 	}
600 
601 	if (sc->biu) {
602 		error = clk_enable(sc->biu);
603 		if (error != 0) {
604 			device_printf(sc->dev, "cannot enable biu clock\n");
605 			goto fail;
606 		}
607 	}
608 
609 	/*
610 	 * CIU (Controller Interface Unit clock) is mandatory
611 	 * if no clock-frequency property is given
612 	 */
613 	error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
614 	if (error != 0 &&
615 	    error != ENOENT &&
616 	    error != ENODEV) {
617 		device_printf(sc->dev, "Cannot get 'ciu' clock\n");
618 		goto fail;
619 	}
620 
621 	if (sc->ciu) {
622 		if (bus_hz != 0) {
623 			error = clk_set_freq(sc->ciu, bus_hz, 0);
624 			if (error != 0)
625 				device_printf(sc->dev,
626 				    "cannot set ciu clock to %u\n", bus_hz);
627 		}
628 		error = clk_enable(sc->ciu);
629 		if (error != 0) {
630 			device_printf(sc->dev, "cannot enable ciu clock\n");
631 			goto fail;
632 		}
633 		clk_get_freq(sc->ciu, &sc->bus_hz);
634 	}
635 
636 	/* Enable regulators */
637 	if (sc->vmmc != NULL) {
638 		error = regulator_enable(sc->vmmc);
639 		if (error != 0) {
640 			device_printf(sc->dev, "Cannot enable vmmc regulator\n");
641 			goto fail;
642 		}
643 	}
644 	if (sc->vqmmc != NULL) {
645 		error = regulator_enable(sc->vqmmc);
646 		if (error != 0) {
647 			device_printf(sc->dev, "Cannot enable vqmmc regulator\n");
648 			goto fail;
649 		}
650 	}
651 
652 	/* Take dwmmc out of reset */
653 	if (sc->hwreset != NULL) {
654 		error = hwreset_deassert(sc->hwreset);
655 		if (error != 0) {
656 			device_printf(sc->dev, "Cannot deassert reset\n");
657 			goto fail;
658 		}
659 	}
660 #endif /* EXT_RESOURCES */
661 
662 	if (sc->bus_hz == 0) {
663 		device_printf(sc->dev, "No bus speed provided\n");
664 		goto fail;
665 	}
666 
667 	return (0);
668 
669 fail:
670 	return (ENXIO);
671 }
672 
673 int
674 dwmmc_attach(device_t dev)
675 {
676 	struct dwmmc_softc *sc;
677 	int error;
678 	int slot;
679 
680 	sc = device_get_softc(dev);
681 
682 	sc->dev = dev;
683 
684 	/* Why not to use Auto Stop? It save a hundred of irq per second */
685 	sc->use_auto_stop = 1;
686 
687 	error = parse_fdt(sc);
688 	if (error != 0) {
689 		device_printf(dev, "Can't get FDT property.\n");
690 		return (ENXIO);
691 	}
692 
693 	DWMMC_LOCK_INIT(sc);
694 
695 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
696 		device_printf(dev, "could not allocate resources\n");
697 		return (ENXIO);
698 	}
699 
700 	/* Setup interrupt handler. */
701 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
702 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
703 	if (error != 0) {
704 		device_printf(dev, "could not setup interrupt handler.\n");
705 		return (ENXIO);
706 	}
707 
708 	device_printf(dev, "Hardware version ID is %04x\n",
709 		READ4(sc, SDMMC_VERID) & 0xffff);
710 
711 	/* XXX: we support operation for slot index 0 only */
712 	slot = 0;
713 	if (sc->pwren_inverted) {
714 		WRITE4(sc, SDMMC_PWREN, (0 << slot));
715 	} else {
716 		WRITE4(sc, SDMMC_PWREN, (1 << slot));
717 	}
718 
719 	/* Reset all */
720 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
721 				  SDMMC_CTRL_FIFO_RESET |
722 				  SDMMC_CTRL_DMA_RESET)))
723 		return (ENXIO);
724 
725 	dwmmc_setup_bus(sc, sc->host.f_min);
726 
727 	if (sc->fifo_depth == 0) {
728 		sc->fifo_depth = 1 +
729 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
730 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
731 		    sc->fifo_depth);
732 	}
733 
734 	if (!sc->use_pio) {
735 		dma_stop(sc);
736 		if (dma_setup(sc))
737 			return (ENXIO);
738 
739 		/* Install desc base */
740 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
741 
742 		/* Enable DMA interrupts */
743 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
744 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
745 					   SDMMC_IDINTEN_RI |
746 					   SDMMC_IDINTEN_TI));
747 	}
748 
749 	/* Clear and disable interrups for a while */
750 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
751 	WRITE4(sc, SDMMC_INTMASK, 0);
752 
753 	/* Maximum timeout */
754 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
755 
756 	/* Enable interrupts */
757 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
758 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
759 				   SDMMC_INTMASK_DTO |
760 				   SDMMC_INTMASK_ACD |
761 				   SDMMC_INTMASK_TXDR |
762 				   SDMMC_INTMASK_RXDR |
763 				   DWMMC_ERR_FLAGS |
764 				   SDMMC_INTMASK_CD));
765 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
766 
767 	TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
768 	TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0,
769 		dwmmc_card_task, sc);
770 
771 #ifdef MMCCAM
772 	sc->ccb = NULL;
773 	if ((sc->devq = cam_simq_alloc(1)) == NULL) {
774 		goto fail;
775 	}
776 
777 	mtx_init(&sc->sim_mtx, "dwmmcsim", NULL, MTX_DEF);
778 	sc->sim = cam_sim_alloc_dev(dwmmc_cam_action, dwmmc_cam_poll,
779 	    "dw_mmc_sim", sc, dev,
780 	    &sc->sim_mtx, 1, 1, sc->devq);
781 
782 	if (sc->sim == NULL) {
783                 cam_simq_free(sc->devq);
784                 device_printf(dev, "cannot allocate CAM SIM\n");
785                 goto fail;
786         }
787 
788 	mtx_lock(&sc->sim_mtx);
789         if (xpt_bus_register(sc->sim, sc->dev, 0) != 0) {
790                 device_printf(sc->dev, "cannot register SCSI pass-through bus\n");
791                 cam_sim_free(sc->sim, FALSE);
792                 cam_simq_free(sc->devq);
793                 mtx_unlock(&sc->sim_mtx);
794                 goto fail;
795         }
796 
797 fail:
798         mtx_unlock(&sc->sim_mtx);
799 #endif
800 	/*
801 	 * Schedule a card detection as we won't get an interrupt
802 	 * if the card is inserted when we attach
803 	 */
804 	dwmmc_card_task(sc, 0);
805 	return (0);
806 }
807 
808 int
809 dwmmc_detach(device_t dev)
810 {
811 	struct dwmmc_softc *sc;
812 	int ret;
813 
814 	sc = device_get_softc(dev);
815 
816 	ret = device_delete_children(dev);
817 	if (ret != 0)
818 		return (ret);
819 
820 	taskqueue_drain(taskqueue_swi_giant, &sc->card_task);
821 	taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task);
822 
823 	if (sc->intr_cookie != NULL) {
824 		ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
825 		if (ret != 0)
826 			return (ret);
827 	}
828 	bus_release_resources(dev, dwmmc_spec, sc->res);
829 
830 	DWMMC_LOCK_DESTROY(sc);
831 
832 #ifdef EXT_RESOURCES
833 	if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0)
834 		device_printf(sc->dev, "cannot deassert reset\n");
835 	if (sc->biu != NULL && clk_disable(sc->biu) != 0)
836 		device_printf(sc->dev, "cannot disable biu clock\n");
837 	if (sc->ciu != NULL && clk_disable(sc->ciu) != 0)
838 			device_printf(sc->dev, "cannot disable ciu clock\n");
839 
840 	if (sc->vmmc && regulator_disable(sc->vmmc) != 0)
841 		device_printf(sc->dev, "Cannot disable vmmc regulator\n");
842 	if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0)
843 		device_printf(sc->dev, "Cannot disable vqmmc regulator\n");
844 #endif
845 
846 	return (0);
847 }
848 
849 static int
850 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
851 {
852 	int tout;
853 	int div;
854 
855 	if (freq == 0) {
856 		WRITE4(sc, SDMMC_CLKENA, 0);
857 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
858 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
859 
860 		tout = 1000;
861 		do {
862 			if (tout-- < 0) {
863 				device_printf(sc->dev, "Failed update clk\n");
864 				return (1);
865 			}
866 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
867 
868 		return (0);
869 	}
870 
871 	WRITE4(sc, SDMMC_CLKENA, 0);
872 	WRITE4(sc, SDMMC_CLKSRC, 0);
873 
874 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
875 
876 	WRITE4(sc, SDMMC_CLKDIV, div);
877 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
878 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
879 
880 	tout = 1000;
881 	do {
882 		if (tout-- < 0) {
883 			device_printf(sc->dev, "Failed to update clk");
884 			return (1);
885 		}
886 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
887 
888 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
889 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
890 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
891 
892 	tout = 1000;
893 	do {
894 		if (tout-- < 0) {
895 			device_printf(sc->dev, "Failed to enable clk\n");
896 			return (1);
897 		}
898 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
899 
900 	return (0);
901 }
902 
903 static int
904 dwmmc_update_ios(device_t brdev, device_t reqdev)
905 {
906 	struct dwmmc_softc *sc;
907 	struct mmc_ios *ios;
908 	uint32_t reg;
909 	int ret = 0;
910 
911 	sc = device_get_softc(brdev);
912 	ios = &sc->host.ios;
913 
914 	dprintf("Setting up clk %u bus_width %d, timming: %d\n",
915 		ios->clock, ios->bus_width, ios->timing);
916 
917 	if (ios->bus_width == bus_width_8)
918 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
919 	else if (ios->bus_width == bus_width_4)
920 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
921 	else
922 		WRITE4(sc, SDMMC_CTYPE, 0);
923 
924 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
925 		/* XXX: take care about DDR or SDR use here */
926 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
927 	}
928 
929 	/* Set DDR mode */
930 	reg = READ4(sc, SDMMC_UHS_REG);
931 	if (ios->timing == bus_timing_uhs_ddr50 ||
932 	    ios->timing == bus_timing_mmc_ddr52 ||
933 	    ios->timing == bus_timing_mmc_hs400)
934 		reg |= (SDMMC_UHS_REG_DDR);
935 	else
936 		reg &= ~(SDMMC_UHS_REG_DDR);
937 	WRITE4(sc, SDMMC_UHS_REG, reg);
938 
939 	if (sc->update_ios)
940 		ret = sc->update_ios(sc, ios);
941 
942 	dwmmc_setup_bus(sc, ios->clock);
943 
944 	return (ret);
945 }
946 
947 static int
948 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
949 {
950 	struct mmc_data *data;
951 
952 	data = cmd->data;
953 
954 	if (data->flags & MMC_DATA_WRITE)
955 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
956 			BUS_DMASYNC_POSTWRITE);
957 	else
958 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
959 			BUS_DMASYNC_POSTREAD);
960 
961 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
962 	    BUS_DMASYNC_POSTWRITE);
963 
964 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
965 
966 	return (0);
967 }
968 
969 static int
970 dma_stop(struct dwmmc_softc *sc)
971 {
972 	int reg;
973 
974 	reg = READ4(sc, SDMMC_CTRL);
975 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
976 	reg |= (SDMMC_CTRL_DMA_RESET);
977 	WRITE4(sc, SDMMC_CTRL, reg);
978 
979 	reg = READ4(sc, SDMMC_BMOD);
980 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
981 	reg |= (SDMMC_BMOD_SWR);
982 	WRITE4(sc, SDMMC_BMOD, reg);
983 
984 	return (0);
985 }
986 
987 static int
988 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
989 {
990 	struct mmc_data *data;
991 	int err;
992 	int reg;
993 
994 	data = cmd->data;
995 
996 	reg = READ4(sc, SDMMC_INTMASK);
997 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
998 	WRITE4(sc, SDMMC_INTMASK, reg);
999 	dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len);
1000 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
1001 		data->data, data->len, dwmmc_ring_setup,
1002 		sc, BUS_DMA_NOWAIT);
1003 	if (err != 0)
1004 		panic("dmamap_load failed\n");
1005 
1006 	/* Ensure the device can see the desc */
1007 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
1008 	    BUS_DMASYNC_PREWRITE);
1009 
1010 	if (data->flags & MMC_DATA_WRITE)
1011 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
1012 			BUS_DMASYNC_PREWRITE);
1013 	else
1014 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
1015 			BUS_DMASYNC_PREREAD);
1016 
1017 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1018 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1019 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1020 
1021 	WRITE4(sc, SDMMC_FIFOTH, reg);
1022 	wmb();
1023 
1024 	reg = READ4(sc, SDMMC_CTRL);
1025 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
1026 	WRITE4(sc, SDMMC_CTRL, reg);
1027 	wmb();
1028 
1029 	reg = READ4(sc, SDMMC_BMOD);
1030 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
1031 	WRITE4(sc, SDMMC_BMOD, reg);
1032 
1033 	/* Start */
1034 	WRITE4(sc, SDMMC_PLDMND, 1);
1035 
1036 	return (0);
1037 }
1038 
1039 static int
1040 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
1041 {
1042 	struct mmc_data *data;
1043 	int reg;
1044 
1045 	data = cmd->data;
1046 	data->xfer_len = 0;
1047 
1048 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1049 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1050 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1051 
1052 	WRITE4(sc, SDMMC_FIFOTH, reg);
1053 	wmb();
1054 
1055 	return (0);
1056 }
1057 
1058 static void
1059 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
1060 {
1061 	struct mmc_data *data;
1062 	uint32_t *p, status;
1063 
1064 	if (cmd == NULL || cmd->data == NULL)
1065 		return;
1066 
1067 	data = cmd->data;
1068 	if ((data->flags & MMC_DATA_READ) == 0)
1069 		return;
1070 
1071 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1072 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1073 
1074 	while (data->xfer_len < data->len) {
1075 		status = READ4(sc, SDMMC_STATUS);
1076 		if (status & SDMMC_STATUS_FIFO_EMPTY)
1077 			break;
1078 		*p++ = READ4(sc, SDMMC_DATA);
1079 		data->xfer_len += 4;
1080 	}
1081 
1082 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
1083 }
1084 
1085 static void
1086 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
1087 {
1088 	struct mmc_data *data;
1089 	uint32_t *p, status;
1090 
1091 	if (cmd == NULL || cmd->data == NULL)
1092 		return;
1093 
1094 	data = cmd->data;
1095 	if ((data->flags & MMC_DATA_WRITE) == 0)
1096 		return;
1097 
1098 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1099 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1100 
1101 	while (data->xfer_len < data->len) {
1102 		status = READ4(sc, SDMMC_STATUS);
1103 		if (status & SDMMC_STATUS_FIFO_FULL)
1104 			break;
1105 		WRITE4(sc, SDMMC_DATA, *p++);
1106 		data->xfer_len += 4;
1107 	}
1108 
1109 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
1110 }
1111 
1112 static void
1113 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
1114 {
1115 	struct mmc_data *data;
1116 	uint32_t blksz;
1117 	uint32_t cmdr;
1118 
1119 	dprintf("%s\n", __func__);
1120 	sc->curcmd = cmd;
1121 	data = cmd->data;
1122 
1123 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
1124 		dwmmc_setup_bus(sc, sc->host.ios.clock);
1125 
1126 #ifndef MMCCAM
1127 	/* XXX Upper layers don't always set this */
1128 	cmd->mrq = sc->req;
1129 #endif
1130 	/* Begin setting up command register. */
1131 
1132 	cmdr = cmd->opcode;
1133 
1134 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
1135 
1136 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
1137 	    cmd->opcode == MMC_GO_IDLE_STATE ||
1138 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
1139 		cmdr |= SDMMC_CMD_STOP_ABORT;
1140 	else if (cmd->opcode != MMC_SEND_STATUS && data)
1141 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
1142 
1143 	/* Set up response handling. */
1144 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
1145 		cmdr |= SDMMC_CMD_RESP_EXP;
1146 		if (cmd->flags & MMC_RSP_136)
1147 			cmdr |= SDMMC_CMD_RESP_LONG;
1148 	}
1149 
1150 	if (cmd->flags & MMC_RSP_CRC)
1151 		cmdr |= SDMMC_CMD_RESP_CRC;
1152 
1153 	/*
1154 	 * XXX: Not all platforms want this.
1155 	 */
1156 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
1157 
1158 	if ((sc->flags & CARD_INIT_DONE) == 0) {
1159 		sc->flags |= (CARD_INIT_DONE);
1160 		cmdr |= SDMMC_CMD_SEND_INIT;
1161 	}
1162 
1163 	if (data) {
1164 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1165 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
1166 		     sc->use_auto_stop)
1167 			cmdr |= SDMMC_CMD_SEND_ASTOP;
1168 
1169 		cmdr |= SDMMC_CMD_DATA_EXP;
1170 		if (data->flags & MMC_DATA_STREAM)
1171 			cmdr |= SDMMC_CMD_MODE_STREAM;
1172 		if (data->flags & MMC_DATA_WRITE)
1173 			cmdr |= SDMMC_CMD_DATA_WRITE;
1174 
1175 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
1176 		WRITE4(sc, SDMMC_BYTCNT, data->len);
1177 		blksz = (data->len < MMC_SECTOR_SIZE) ? \
1178 			 data->len : MMC_SECTOR_SIZE;
1179 		WRITE4(sc, SDMMC_BLKSIZ, blksz);
1180 
1181 		if (sc->use_pio) {
1182 			pio_prepare(sc, cmd);
1183 		} else {
1184 			dma_prepare(sc, cmd);
1185 		}
1186 		wmb();
1187 	}
1188 
1189 	dprintf("cmdr 0x%08x\n", cmdr);
1190 
1191 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
1192 	wmb();
1193 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
1194 };
1195 
1196 static void
1197 dwmmc_next_operation(struct dwmmc_softc *sc)
1198 {
1199 	struct mmc_command *cmd;
1200 	dprintf("%s\n", __func__);
1201 #ifdef MMCCAM
1202 	union ccb *ccb;
1203 
1204 	ccb = sc->ccb;
1205 	if (ccb == NULL)
1206 		return;
1207 	cmd = &ccb->mmcio.cmd;
1208 #else
1209 	struct mmc_request *req;
1210 
1211 	req = sc->req;
1212 	if (req == NULL)
1213 		return;
1214 	cmd = req->cmd;
1215 #endif
1216 
1217 	sc->acd_rcvd = 0;
1218 	sc->dto_rcvd = 0;
1219 	sc->cmd_done = 0;
1220 
1221 	/*
1222 	 * XXX: Wait until card is still busy.
1223 	 * We do need this to prevent data timeouts,
1224 	 * mostly caused by multi-block write command
1225 	 * followed by single-read.
1226 	 */
1227 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1228 		continue;
1229 
1230 	if (sc->flags & PENDING_CMD) {
1231 		sc->flags &= ~PENDING_CMD;
1232 		dwmmc_start_cmd(sc, cmd);
1233 		return;
1234 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1235 		sc->flags &= ~PENDING_STOP;
1236 		/// XXX: What to do with this?
1237 		//dwmmc_start_cmd(sc, req->stop);
1238 		return;
1239 	}
1240 
1241 #ifdef MMCCAM
1242 	sc->ccb = NULL;
1243 	sc->curcmd = NULL;
1244 	ccb->ccb_h.status =
1245 		(ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
1246 	xpt_done(ccb);
1247 #else
1248 	sc->req = NULL;
1249 	sc->curcmd = NULL;
1250 	req->done(req);
1251 #endif
1252 }
1253 
1254 static int
1255 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1256 {
1257 	struct dwmmc_softc *sc;
1258 
1259 	sc = device_get_softc(brdev);
1260 
1261 	dprintf("%s\n", __func__);
1262 
1263 	DWMMC_LOCK(sc);
1264 
1265 #ifdef MMCCAM
1266 	sc->flags |= PENDING_CMD;
1267 #else
1268 	if (sc->req != NULL) {
1269 		DWMMC_UNLOCK(sc);
1270 		return (EBUSY);
1271 	}
1272 
1273 	sc->req = req;
1274 	sc->flags |= PENDING_CMD;
1275 	if (sc->req->stop)
1276 		sc->flags |= PENDING_STOP;
1277 #endif
1278 	dwmmc_next_operation(sc);
1279 
1280 	DWMMC_UNLOCK(sc);
1281 	return (0);
1282 }
1283 
1284 static int
1285 dwmmc_get_ro(device_t brdev, device_t reqdev)
1286 {
1287 
1288 	dprintf("%s\n", __func__);
1289 
1290 	return (0);
1291 }
1292 
1293 static int
1294 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1295 {
1296 	struct dwmmc_softc *sc;
1297 
1298 	sc = device_get_softc(brdev);
1299 
1300 	DWMMC_LOCK(sc);
1301 	while (sc->bus_busy)
1302 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1303 	sc->bus_busy++;
1304 	DWMMC_UNLOCK(sc);
1305 	return (0);
1306 }
1307 
1308 static int
1309 dwmmc_release_host(device_t brdev, device_t reqdev)
1310 {
1311 	struct dwmmc_softc *sc;
1312 
1313 	sc = device_get_softc(brdev);
1314 
1315 	DWMMC_LOCK(sc);
1316 	sc->bus_busy--;
1317 	wakeup(sc);
1318 	DWMMC_UNLOCK(sc);
1319 	return (0);
1320 }
1321 
1322 static int
1323 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1324 {
1325 	struct dwmmc_softc *sc;
1326 
1327 	sc = device_get_softc(bus);
1328 
1329 	switch (which) {
1330 	default:
1331 		return (EINVAL);
1332 	case MMCBR_IVAR_BUS_MODE:
1333 		*(int *)result = sc->host.ios.bus_mode;
1334 		break;
1335 	case MMCBR_IVAR_BUS_WIDTH:
1336 		*(int *)result = sc->host.ios.bus_width;
1337 		break;
1338 	case MMCBR_IVAR_CHIP_SELECT:
1339 		*(int *)result = sc->host.ios.chip_select;
1340 		break;
1341 	case MMCBR_IVAR_CLOCK:
1342 		*(int *)result = sc->host.ios.clock;
1343 		break;
1344 	case MMCBR_IVAR_F_MIN:
1345 		*(int *)result = sc->host.f_min;
1346 		break;
1347 	case MMCBR_IVAR_F_MAX:
1348 		*(int *)result = sc->host.f_max;
1349 		break;
1350 	case MMCBR_IVAR_HOST_OCR:
1351 		*(int *)result = sc->host.host_ocr;
1352 		break;
1353 	case MMCBR_IVAR_MODE:
1354 		*(int *)result = sc->host.mode;
1355 		break;
1356 	case MMCBR_IVAR_OCR:
1357 		*(int *)result = sc->host.ocr;
1358 		break;
1359 	case MMCBR_IVAR_POWER_MODE:
1360 		*(int *)result = sc->host.ios.power_mode;
1361 		break;
1362 	case MMCBR_IVAR_VDD:
1363 		*(int *)result = sc->host.ios.vdd;
1364 		break;
1365 	case MMCBR_IVAR_VCCQ:
1366 		*(int *)result = sc->host.ios.vccq;
1367 		break;
1368 	case MMCBR_IVAR_CAPS:
1369 		*(int *)result = sc->host.caps;
1370 		break;
1371 	case MMCBR_IVAR_MAX_DATA:
1372 		/*
1373 		 * Busdma may bounce buffers, so we must reserve 2 descriptors
1374 		 * (on start and on end) for bounced fragments.
1375 		 *
1376 		 */
1377 		*(int *)result = (IDMAC_MAX_SIZE * IDMAC_DESC_SEGS) /
1378 		    MMC_SECTOR_SIZE - 3;
1379 		break;
1380 	case MMCBR_IVAR_TIMING:
1381 		*(int *)result = sc->host.ios.timing;
1382 		break;
1383 	}
1384 	return (0);
1385 }
1386 
1387 static int
1388 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1389 {
1390 	struct dwmmc_softc *sc;
1391 
1392 	sc = device_get_softc(bus);
1393 
1394 	switch (which) {
1395 	default:
1396 		return (EINVAL);
1397 	case MMCBR_IVAR_BUS_MODE:
1398 		sc->host.ios.bus_mode = value;
1399 		break;
1400 	case MMCBR_IVAR_BUS_WIDTH:
1401 		sc->host.ios.bus_width = value;
1402 		break;
1403 	case MMCBR_IVAR_CHIP_SELECT:
1404 		sc->host.ios.chip_select = value;
1405 		break;
1406 	case MMCBR_IVAR_CLOCK:
1407 		sc->host.ios.clock = value;
1408 		break;
1409 	case MMCBR_IVAR_MODE:
1410 		sc->host.mode = value;
1411 		break;
1412 	case MMCBR_IVAR_OCR:
1413 		sc->host.ocr = value;
1414 		break;
1415 	case MMCBR_IVAR_POWER_MODE:
1416 		sc->host.ios.power_mode = value;
1417 		break;
1418 	case MMCBR_IVAR_VDD:
1419 		sc->host.ios.vdd = value;
1420 		break;
1421 	case MMCBR_IVAR_TIMING:
1422 		sc->host.ios.timing = value;
1423 		break;
1424 	case MMCBR_IVAR_VCCQ:
1425 		sc->host.ios.vccq = value;
1426 		break;
1427 	/* These are read-only */
1428 	case MMCBR_IVAR_CAPS:
1429 	case MMCBR_IVAR_HOST_OCR:
1430 	case MMCBR_IVAR_F_MIN:
1431 	case MMCBR_IVAR_F_MAX:
1432 	case MMCBR_IVAR_MAX_DATA:
1433 		return (EINVAL);
1434 	}
1435 	return (0);
1436 }
1437 
1438 #ifdef MMCCAM
1439 /* Note: this function likely belongs to the specific driver impl */
1440 static int
1441 dwmmc_switch_vccq(device_t dev, device_t child)
1442 {
1443 	device_printf(dev, "This is a default impl of switch_vccq() that always fails\n");
1444 	return EINVAL;
1445 }
1446 
1447 static void
1448 dwmmc_cam_handle_mmcio(struct cam_sim *sim, union ccb *ccb)
1449 {
1450 	struct dwmmc_softc *sc;
1451 
1452 	sc = cam_sim_softc(sim);
1453 
1454 	dwmmc_cam_request(sc, ccb);
1455 }
1456 
1457 static void
1458 dwmmc_cam_action(struct cam_sim *sim, union ccb *ccb)
1459 {
1460 	struct dwmmc_softc *sc;
1461 
1462 	sc = cam_sim_softc(sim);
1463 	if (sc == NULL) {
1464 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1465 		xpt_done(ccb);
1466 		return;
1467 	}
1468 
1469 	mtx_assert(&sc->sim_mtx, MA_OWNED);
1470 
1471 	switch (ccb->ccb_h.func_code) {
1472 	case XPT_PATH_INQ:
1473 		/* XXX: correctly calculate maxio here */
1474 		mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim, MMC_SECTOR_SIZE);
1475 		break;
1476 
1477 	case XPT_GET_TRAN_SETTINGS:
1478 	{
1479 		struct ccb_trans_settings *cts = &ccb->cts;
1480 
1481 		cts->protocol = PROTO_MMCSD;
1482 		cts->protocol_version = 1;
1483 		cts->transport = XPORT_MMCSD;
1484 		cts->transport_version = 1;
1485 		cts->xport_specific.valid = 0;
1486 		cts->proto_specific.mmc.host_ocr = sc->host.host_ocr;
1487 		cts->proto_specific.mmc.host_f_min = sc->host.f_min;
1488 		cts->proto_specific.mmc.host_f_max = sc->host.f_max;
1489 		cts->proto_specific.mmc.host_caps = sc->host.caps;
1490 		/* XXX: correctly calculate host_max_data */
1491 		cts->proto_specific.mmc.host_max_data = 1;
1492 		memcpy(&cts->proto_specific.mmc.ios, &sc->host.ios, sizeof(struct mmc_ios));
1493 		ccb->ccb_h.status = CAM_REQ_CMP;
1494 		break;
1495 	}
1496 	case XPT_SET_TRAN_SETTINGS:
1497 	{
1498 		dwmmc_cam_settran_settings(sc, ccb);
1499 		ccb->ccb_h.status = CAM_REQ_CMP;
1500 		break;
1501 	}
1502 	case XPT_RESET_BUS: {
1503 		struct ccb_trans_settings_mmc *cts;
1504 
1505 		cts = &ccb->cts.proto_specific.mmc;
1506 		cts->ios_valid = MMC_PM;
1507 		cts->ios.power_mode = power_off;
1508 		/* Power off the MMC bus */
1509 		if (dwmmc_cam_settran_settings(sc, ccb) != 0) {
1510 			device_printf(sc->dev,"cannot power down the MMC bus\n");
1511 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1512 			break;
1513 		}
1514 
1515 		/* Soft Reset controller and run initialization again */
1516 		if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
1517 				  SDMMC_CTRL_FIFO_RESET |
1518 				  SDMMC_CTRL_DMA_RESET)) != 0) {
1519 			device_printf(sc->dev, "cannot reset the controller\n");
1520 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1521 			break;
1522 		}
1523 
1524 		cts->ios_valid = MMC_PM;
1525 		cts->ios.power_mode = power_on;
1526 		/* Power off the MMC bus */
1527 		if (dwmmc_cam_settran_settings(sc, ccb) != 0) {
1528 			device_printf(sc->dev, "cannot power on the MMC bus\n");
1529 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1530 			break;
1531 		}
1532 
1533 		ccb->ccb_h.status = CAM_REQ_CMP;
1534 		break;
1535 	}
1536 	case XPT_MMC_IO:
1537 		/*
1538 		 * Here is the HW-dependent part of
1539 		 * sending the command to the underlying h/w
1540 		 * At some point in the future an interrupt comes.
1541 		 * Then the request will be marked as completed.
1542 		 */
1543 		ccb->ccb_h.status = CAM_REQ_INPROG;
1544 
1545 		dwmmc_cam_handle_mmcio(sim, ccb);
1546 		return;
1547 		/* NOTREACHED */
1548 		break;
1549 	default:
1550 		ccb->ccb_h.status = CAM_REQ_INVALID;
1551 		break;
1552 	}
1553 	xpt_done(ccb);
1554 	return;
1555 }
1556 
1557 static void
1558 dwmmc_cam_poll(struct cam_sim *sim)
1559 {
1560 	return;
1561 }
1562 
1563 static int
1564 dwmmc_cam_settran_settings(struct dwmmc_softc *sc, union ccb *ccb)
1565 {
1566 	struct mmc_ios *ios;
1567 	struct mmc_ios *new_ios;
1568 	struct ccb_trans_settings_mmc *cts;
1569 	int res;
1570 
1571 	ios = &sc->host.ios;
1572 
1573 	cts = &ccb->cts.proto_specific.mmc;
1574 	new_ios = &cts->ios;
1575 
1576 	/* Update only requested fields */
1577 	if (cts->ios_valid & MMC_CLK) {
1578 		ios->clock = new_ios->clock;
1579 		if (bootverbose)
1580 			device_printf(sc->dev, "Clock => %d\n", ios->clock);
1581 	}
1582 	if (cts->ios_valid & MMC_VDD) {
1583 		ios->vdd = new_ios->vdd;
1584 		if (bootverbose)
1585 			device_printf(sc->dev, "VDD => %d\n", ios->vdd);
1586 	}
1587 	if (cts->ios_valid & MMC_CS) {
1588 		ios->chip_select = new_ios->chip_select;
1589 		if (bootverbose)
1590 			device_printf(sc->dev, "CS => %d\n", ios->chip_select);
1591 	}
1592 	if (cts->ios_valid & MMC_BW) {
1593 		ios->bus_width = new_ios->bus_width;
1594 		if (bootverbose)
1595 			device_printf(sc->dev, "Bus width => %d\n", ios->bus_width);
1596 	}
1597 	if (cts->ios_valid & MMC_PM) {
1598 		ios->power_mode = new_ios->power_mode;
1599 		if (bootverbose)
1600 			device_printf(sc->dev, "Power mode => %d\n", ios->power_mode);
1601 	}
1602 	if (cts->ios_valid & MMC_BT) {
1603 		ios->timing = new_ios->timing;
1604 		if (bootverbose)
1605 			device_printf(sc->dev, "Timing => %d\n", ios->timing);
1606 	}
1607 	if (cts->ios_valid & MMC_BM) {
1608 		ios->bus_mode = new_ios->bus_mode;
1609 		if (bootverbose)
1610 			device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode);
1611 	}
1612 	if (cts->ios_valid & MMC_VCCQ) {
1613 		ios->vccq = new_ios->vccq;
1614 		if (bootverbose)
1615 			device_printf(sc->dev, "VCCQ => %d\n", ios->vccq);
1616 		res = dwmmc_switch_vccq(sc->dev, NULL);
1617 		device_printf(sc->dev, "VCCQ switch result: %d\n", res);
1618 	}
1619 
1620 	return (dwmmc_update_ios(sc->dev, NULL));
1621 }
1622 
1623 static int
1624 dwmmc_cam_request(struct dwmmc_softc *sc, union ccb *ccb)
1625 {
1626 	struct ccb_mmcio *mmcio;
1627 
1628 	mmcio = &ccb->mmcio;
1629 
1630 	DWMMC_LOCK(sc);
1631 
1632 #ifdef DEBUG
1633 	if (__predict_false(bootverbose)) {
1634 		device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1635 			    mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
1636 			    mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
1637 			    mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
1638 	}
1639 #endif
1640 	if (mmcio->cmd.data != NULL) {
1641 		if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
1642 			panic("data->len = %d, data->flags = %d -- something is b0rked",
1643 			      (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
1644 	}
1645 	if (sc->ccb != NULL) {
1646 		device_printf(sc->dev, "Controller still has an active command\n");
1647 		return (EBUSY);
1648 	}
1649 	sc->ccb = ccb;
1650 	DWMMC_UNLOCK(sc);
1651 	dwmmc_request(sc->dev, NULL, NULL);
1652 
1653 	return (0);
1654 }
1655 #endif
1656 
1657 static device_method_t dwmmc_methods[] = {
1658 	/* Bus interface */
1659 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1660 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1661 
1662 	/* mmcbr_if */
1663 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1664 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1665 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1666 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1667 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1668 
1669 	DEVMETHOD_END
1670 };
1671 
1672 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1673     sizeof(struct dwmmc_softc));
1674