xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision d38c30c092828f4882ce13b08d0bd3fd6dc7afb5)
1 /*-
2  * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/rman.h>
48 #include <sys/queue.h>
49 #include <sys/taskqueue.h>
50 
51 #include <dev/mmc/bridge.h>
52 #include <dev/mmc/mmcbrvar.h>
53 
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/openfirm.h>
56 #include <dev/ofw/ofw_bus.h>
57 #include <dev/ofw/ofw_bus_subr.h>
58 
59 #include <machine/bus.h>
60 #include <machine/cpu.h>
61 #include <machine/intr.h>
62 
63 #ifdef EXT_RESOURCES
64 #include <dev/extres/clk/clk.h>
65 #endif
66 
67 #include <dev/mmc/host/dwmmc_reg.h>
68 #include <dev/mmc/host/dwmmc_var.h>
69 
70 #include "mmcbr_if.h"
71 
72 #define dprintf(x, arg...)
73 
74 #define	READ4(_sc, _reg) \
75 	bus_read_4((_sc)->res[0], _reg)
76 #define	WRITE4(_sc, _reg, _val) \
77 	bus_write_4((_sc)->res[0], _reg, _val)
78 
79 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
80 
81 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
82 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
83 #define	DWMMC_LOCK_INIT(_sc) \
84 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
85 	    "dwmmc", MTX_DEF)
86 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
87 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
88 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
89 
90 #define	PENDING_CMD	0x01
91 #define	PENDING_STOP	0x02
92 #define	CARD_INIT_DONE	0x04
93 
94 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
95 				|SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \
96 				|SDMMC_INTMASK_EBE)
97 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
98 				|SDMMC_INTMASK_RE)
99 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
100 				|SDMMC_INTMASK_HLE)
101 
102 #define	DES0_DIC	(1 << 1)
103 #define	DES0_LD		(1 << 2)
104 #define	DES0_FS		(1 << 3)
105 #define	DES0_CH		(1 << 4)
106 #define	DES0_ER		(1 << 5)
107 #define	DES0_CES	(1 << 30)
108 #define	DES0_OWN	(1 << 31)
109 
110 #define	DES1_BS1_MASK	0xfff
111 #define	DES1_BS1_SHIFT	0
112 
113 struct idmac_desc {
114 	uint32_t	des0;	/* control */
115 	uint32_t	des1;	/* bufsize */
116 	uint32_t	des2;	/* buf1 phys addr */
117 	uint32_t	des3;	/* buf2 phys addr or next descr */
118 };
119 
120 #define	DESC_MAX	256
121 #define	DESC_SIZE	(sizeof(struct idmac_desc) * DESC_MAX)
122 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
123 
124 static void dwmmc_next_operation(struct dwmmc_softc *);
125 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
126 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
127 static int dma_stop(struct dwmmc_softc *);
128 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
129 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
130 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present);
131 
132 static struct resource_spec dwmmc_spec[] = {
133 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
134 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
135 	{ -1, 0 }
136 };
137 
138 #define	HWTYPE_MASK		(0x0000ffff)
139 #define	HWFLAG_MASK		(0xffff << 16)
140 
141 static void
142 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
143 {
144 
145 	if (error != 0)
146 		return;
147 	*(bus_addr_t *)arg = segs[0].ds_addr;
148 }
149 
150 static void
151 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
152 {
153 	struct dwmmc_softc *sc;
154 	int idx;
155 
156 	if (error != 0)
157 		return;
158 
159 	sc = arg;
160 
161 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
162 
163 	for (idx = 0; idx < nsegs; idx++) {
164 		sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH);
165 		sc->desc_ring[idx].des1 = segs[idx].ds_len;
166 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
167 
168 		if (idx == 0)
169 			sc->desc_ring[idx].des0 |= DES0_FS;
170 
171 		if (idx == (nsegs - 1)) {
172 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
173 			sc->desc_ring[idx].des0 |= DES0_LD;
174 		}
175 	}
176 }
177 
178 static int
179 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
180 {
181 	int reg;
182 	int i;
183 
184 	reg = READ4(sc, SDMMC_CTRL);
185 	reg |= (reset_bits);
186 	WRITE4(sc, SDMMC_CTRL, reg);
187 
188 	/* Wait reset done */
189 	for (i = 0; i < 100; i++) {
190 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
191 			return (0);
192 		DELAY(10);
193 	}
194 
195 	device_printf(sc->dev, "Reset failed\n");
196 
197 	return (1);
198 }
199 
200 static int
201 dma_setup(struct dwmmc_softc *sc)
202 {
203 	int error;
204 	int nidx;
205 	int idx;
206 
207 	/*
208 	 * Set up TX descriptor ring, descriptors, and dma maps.
209 	 */
210 	error = bus_dma_tag_create(
211 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
212 	    4096, 0,			/* alignment, boundary */
213 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
214 	    BUS_SPACE_MAXADDR,		/* highaddr */
215 	    NULL, NULL,			/* filter, filterarg */
216 	    DESC_SIZE, 1, 		/* maxsize, nsegments */
217 	    DESC_SIZE,			/* maxsegsize */
218 	    0,				/* flags */
219 	    NULL, NULL,			/* lockfunc, lockarg */
220 	    &sc->desc_tag);
221 	if (error != 0) {
222 		device_printf(sc->dev,
223 		    "could not create ring DMA tag.\n");
224 		return (1);
225 	}
226 
227 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
228 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
229 	    &sc->desc_map);
230 	if (error != 0) {
231 		device_printf(sc->dev,
232 		    "could not allocate descriptor ring.\n");
233 		return (1);
234 	}
235 
236 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
237 	    sc->desc_ring, DESC_SIZE, dwmmc_get1paddr,
238 	    &sc->desc_ring_paddr, 0);
239 	if (error != 0) {
240 		device_printf(sc->dev,
241 		    "could not load descriptor ring map.\n");
242 		return (1);
243 	}
244 
245 	for (idx = 0; idx < sc->desc_count; idx++) {
246 		sc->desc_ring[idx].des0 = DES0_CH;
247 		sc->desc_ring[idx].des1 = 0;
248 		nidx = (idx + 1) % sc->desc_count;
249 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
250 		    (nidx * sizeof(struct idmac_desc));
251 	}
252 
253 	error = bus_dma_tag_create(
254 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
255 	    4096, 0,			/* alignment, boundary */
256 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
257 	    BUS_SPACE_MAXADDR,		/* highaddr */
258 	    NULL, NULL,			/* filter, filterarg */
259 	    sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */
260 	    sc->desc_count,		/* nsegments */
261 	    MMC_SECTOR_SIZE,		/* maxsegsize */
262 	    0,				/* flags */
263 	    NULL, NULL,			/* lockfunc, lockarg */
264 	    &sc->buf_tag);
265 	if (error != 0) {
266 		device_printf(sc->dev,
267 		    "could not create ring DMA tag.\n");
268 		return (1);
269 	}
270 
271 	error = bus_dmamap_create(sc->buf_tag, 0,
272 	    &sc->buf_map);
273 	if (error != 0) {
274 		device_printf(sc->dev,
275 		    "could not create TX buffer DMA map.\n");
276 		return (1);
277 	}
278 
279 	return (0);
280 }
281 
282 static void
283 dwmmc_cmd_done(struct dwmmc_softc *sc)
284 {
285 	struct mmc_command *cmd;
286 
287 	cmd = sc->curcmd;
288 	if (cmd == NULL)
289 		return;
290 
291 	if (cmd->flags & MMC_RSP_PRESENT) {
292 		if (cmd->flags & MMC_RSP_136) {
293 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
294 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
295 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
296 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
297 		} else {
298 			cmd->resp[3] = 0;
299 			cmd->resp[2] = 0;
300 			cmd->resp[1] = 0;
301 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
302 		}
303 	}
304 }
305 
306 static void
307 dwmmc_tasklet(struct dwmmc_softc *sc)
308 {
309 	struct mmc_command *cmd;
310 
311 	cmd = sc->curcmd;
312 	if (cmd == NULL)
313 		return;
314 
315 	if (!sc->cmd_done)
316 		return;
317 
318 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
319 		dwmmc_next_operation(sc);
320 	} else if (cmd->data && sc->dto_rcvd) {
321 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
322 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
323 		     sc->use_auto_stop) {
324 			if (sc->acd_rcvd)
325 				dwmmc_next_operation(sc);
326 		} else {
327 			dwmmc_next_operation(sc);
328 		}
329 	}
330 }
331 
332 static void
333 dwmmc_intr(void *arg)
334 {
335 	struct mmc_command *cmd;
336 	struct dwmmc_softc *sc;
337 	uint32_t reg;
338 
339 	sc = arg;
340 
341 	DWMMC_LOCK(sc);
342 
343 	cmd = sc->curcmd;
344 
345 	/* First handle SDMMC controller interrupts */
346 	reg = READ4(sc, SDMMC_MINTSTS);
347 	if (reg) {
348 		dprintf("%s 0x%08x\n", __func__, reg);
349 
350 		if (reg & DWMMC_CMD_ERR_FLAGS) {
351 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
352 				reg, cmd->opcode);
353 			cmd->error = MMC_ERR_TIMEOUT;
354 		}
355 
356 		if (reg & DWMMC_DATA_ERR_FLAGS) {
357 			dprintf("data err 0x%08x cmd 0x%08x\n",
358 				reg, cmd->opcode);
359 			cmd->error = MMC_ERR_FAILED;
360 			if (!sc->use_pio) {
361 				dma_done(sc, cmd);
362 				dma_stop(sc);
363 			}
364 		}
365 
366 		if (reg & SDMMC_INTMASK_CMD_DONE) {
367 			dwmmc_cmd_done(sc);
368 			sc->cmd_done = 1;
369 		}
370 
371 		if (reg & SDMMC_INTMASK_ACD)
372 			sc->acd_rcvd = 1;
373 
374 		if (reg & SDMMC_INTMASK_DTO)
375 			sc->dto_rcvd = 1;
376 
377 		if (reg & SDMMC_INTMASK_CD) {
378 			dwmmc_handle_card_present(sc,
379 			    READ4(sc, SDMMC_CDETECT) == 0 ? true : false);
380 		}
381 	}
382 
383 	/* Ack interrupts */
384 	WRITE4(sc, SDMMC_RINTSTS, reg);
385 
386 	if (sc->use_pio) {
387 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
388 			pio_read(sc, cmd);
389 		}
390 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
391 			pio_write(sc, cmd);
392 		}
393 	} else {
394 		/* Now handle DMA interrupts */
395 		reg = READ4(sc, SDMMC_IDSTS);
396 		if (reg) {
397 			dprintf("dma intr 0x%08x\n", reg);
398 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
399 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
400 							 SDMMC_IDINTEN_RI));
401 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
402 				dma_done(sc, cmd);
403 			}
404 		}
405 	}
406 
407 	dwmmc_tasklet(sc);
408 
409 	DWMMC_UNLOCK(sc);
410 }
411 
412 static void
413 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
414 {
415 	bool was_present;
416 
417 	was_present = sc->child != NULL;
418 
419 	if (!was_present && is_present) {
420 		taskqueue_enqueue_timeout(taskqueue_swi_giant,
421 		  &sc->card_delayed_task, -(hz / 2));
422 	} else if (was_present && !is_present) {
423 		taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task);
424 	}
425 }
426 
427 static void
428 dwmmc_card_task(void *arg, int pending __unused)
429 {
430 	struct dwmmc_softc *sc = arg;
431 
432 	DWMMC_LOCK(sc);
433 
434 	if (READ4(sc, SDMMC_CDETECT) == 0) {
435 		if (sc->child == NULL) {
436 			if (bootverbose)
437 				device_printf(sc->dev, "Card inserted\n");
438 
439 			sc->child = device_add_child(sc->dev, "mmc", -1);
440 			DWMMC_UNLOCK(sc);
441 			if (sc->child) {
442 				device_set_ivars(sc->child, sc);
443 				(void)device_probe_and_attach(sc->child);
444 			}
445 		} else
446 			DWMMC_UNLOCK(sc);
447 
448 	} else {
449 		/* Card isn't present, detach if necessary */
450 		if (sc->child != NULL) {
451 			if (bootverbose)
452 				device_printf(sc->dev, "Card removed\n");
453 
454 			DWMMC_UNLOCK(sc);
455 			device_delete_child(sc->dev, sc->child);
456 			sc->child = NULL;
457 		} else
458 			DWMMC_UNLOCK(sc);
459 	}
460 }
461 
462 static int
463 parse_fdt(struct dwmmc_softc *sc)
464 {
465 	pcell_t dts_value[3];
466 	phandle_t node;
467 	uint32_t bus_hz = 0, bus_width;
468 	int len;
469 #ifdef EXT_RESOURCES
470 	int error;
471 #endif
472 
473 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
474 		return (ENXIO);
475 
476 	/* bus-width */
477 	if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
478 		bus_width = 4;
479 	if (bus_width >= 4)
480 		sc->host.caps |= MMC_CAP_4_BIT_DATA;
481 	if (bus_width >= 8)
482 		sc->host.caps |= MMC_CAP_8_BIT_DATA;
483 
484 	/* max-frequency */
485 	if (OF_getencprop(node, "max-frequency", &sc->host.f_max, sizeof(uint32_t)) <= 0)
486 		sc->host.f_max = 200000000;
487 
488 	/* fifo-depth */
489 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
490 		OF_getencprop(node, "fifo-depth", dts_value, len);
491 		sc->fifo_depth = dts_value[0];
492 	}
493 
494 	/* num-slots (Deprecated) */
495 	sc->num_slots = 1;
496 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
497 		device_printf(sc->dev, "num-slots property is deprecated\n");
498 		OF_getencprop(node, "num-slots", dts_value, len);
499 		sc->num_slots = dts_value[0];
500 	}
501 
502 	/* clock-frequency */
503 	if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
504 		OF_getencprop(node, "clock-frequency", dts_value, len);
505 		bus_hz = dts_value[0];
506 	}
507 
508 #ifdef EXT_RESOURCES
509 
510 	/* IP block reset is optional */
511 	error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset);
512 	if (error != 0 &&
513 	    error != ENOENT &&
514 	    error != ENODEV) {
515 		device_printf(sc->dev, "Cannot get reset\n");
516 		goto fail;
517 	}
518 
519 	/* vmmc regulator is optional */
520 	error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply",
521 	     &sc->vmmc);
522 	if (error != 0 &&
523 	    error != ENOENT &&
524 	    error != ENODEV) {
525 		device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n");
526 		goto fail;
527 	}
528 
529 	/* vqmmc regulator is optional */
530 	error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply",
531 	     &sc->vqmmc);
532 	if (error != 0 &&
533 	    error != ENOENT &&
534 	    error != ENODEV) {
535 		device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n");
536 		goto fail;
537 	}
538 
539 	/* Assert reset first */
540 	if (sc->hwreset != NULL) {
541 		error = hwreset_assert(sc->hwreset);
542 		if (error != 0) {
543 			device_printf(sc->dev, "Cannot assert reset\n");
544 			goto fail;
545 		}
546 	}
547 
548 	/* BIU (Bus Interface Unit clock) is optional */
549 	error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
550 	if (error != 0 &&
551 	    error != ENOENT &&
552 	    error != ENODEV) {
553 		device_printf(sc->dev, "Cannot get 'biu' clock\n");
554 		goto fail;
555 	}
556 
557 	if (sc->biu) {
558 		error = clk_enable(sc->biu);
559 		if (error != 0) {
560 			device_printf(sc->dev, "cannot enable biu clock\n");
561 			goto fail;
562 		}
563 	}
564 
565 	/*
566 	 * CIU (Controller Interface Unit clock) is mandatory
567 	 * if no clock-frequency property is given
568 	 */
569 	error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
570 	if (error != 0 &&
571 	    error != ENOENT &&
572 	    error != ENODEV) {
573 		device_printf(sc->dev, "Cannot get 'ciu' clock\n");
574 		goto fail;
575 	}
576 
577 	if (sc->ciu) {
578 		if (bus_hz != 0) {
579 			error = clk_set_freq(sc->ciu, bus_hz, 0);
580 			if (error != 0)
581 				device_printf(sc->dev,
582 				    "cannot set ciu clock to %u\n", bus_hz);
583 		}
584 		error = clk_enable(sc->ciu);
585 		if (error != 0) {
586 			device_printf(sc->dev, "cannot enable ciu clock\n");
587 			goto fail;
588 		}
589 		clk_get_freq(sc->ciu, &sc->bus_hz);
590 	}
591 
592 	/* Enable regulators */
593 	if (sc->vmmc != NULL) {
594 		error = regulator_enable(sc->vmmc);
595 		if (error != 0) {
596 			device_printf(sc->dev, "Cannot enable vmmc regulator\n");
597 			goto fail;
598 		}
599 	}
600 	if (sc->vqmmc != NULL) {
601 		error = regulator_enable(sc->vqmmc);
602 		if (error != 0) {
603 			device_printf(sc->dev, "Cannot enable vqmmc regulator\n");
604 			goto fail;
605 		}
606 	}
607 
608 	/* Take dwmmc out of reset */
609 	if (sc->hwreset != NULL) {
610 		error = hwreset_deassert(sc->hwreset);
611 		if (error != 0) {
612 			device_printf(sc->dev, "Cannot deassert reset\n");
613 			goto fail;
614 		}
615 	}
616 #endif /* EXT_RESOURCES */
617 
618 	if (sc->bus_hz == 0) {
619 		device_printf(sc->dev, "No bus speed provided\n");
620 		goto fail;
621 	}
622 
623 	return (0);
624 
625 fail:
626 	return (ENXIO);
627 }
628 
629 int
630 dwmmc_attach(device_t dev)
631 {
632 	struct dwmmc_softc *sc;
633 	int error;
634 	int slot;
635 
636 	sc = device_get_softc(dev);
637 
638 	sc->dev = dev;
639 
640 	/* Why not to use Auto Stop? It save a hundred of irq per second */
641 	sc->use_auto_stop = 1;
642 
643 	error = parse_fdt(sc);
644 	if (error != 0) {
645 		device_printf(dev, "Can't get FDT property.\n");
646 		return (ENXIO);
647 	}
648 
649 	DWMMC_LOCK_INIT(sc);
650 
651 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
652 		device_printf(dev, "could not allocate resources\n");
653 		return (ENXIO);
654 	}
655 
656 	/* Setup interrupt handler. */
657 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
658 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
659 	if (error != 0) {
660 		device_printf(dev, "could not setup interrupt handler.\n");
661 		return (ENXIO);
662 	}
663 
664 	device_printf(dev, "Hardware version ID is %04x\n",
665 		READ4(sc, SDMMC_VERID) & 0xffff);
666 
667 	if (sc->desc_count == 0)
668 		sc->desc_count = DESC_MAX;
669 
670 	/* XXX: we support operation for slot index 0 only */
671 	slot = 0;
672 	if (sc->pwren_inverted) {
673 		WRITE4(sc, SDMMC_PWREN, (0 << slot));
674 	} else {
675 		WRITE4(sc, SDMMC_PWREN, (1 << slot));
676 	}
677 
678 	/* Reset all */
679 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
680 				  SDMMC_CTRL_FIFO_RESET |
681 				  SDMMC_CTRL_DMA_RESET)))
682 		return (ENXIO);
683 
684 	dwmmc_setup_bus(sc, sc->host.f_min);
685 
686 	if (sc->fifo_depth == 0) {
687 		sc->fifo_depth = 1 +
688 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
689 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
690 		    sc->fifo_depth);
691 	}
692 
693 	if (!sc->use_pio) {
694 		dma_stop(sc);
695 		if (dma_setup(sc))
696 			return (ENXIO);
697 
698 		/* Install desc base */
699 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
700 
701 		/* Enable DMA interrupts */
702 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
703 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
704 					   SDMMC_IDINTEN_RI |
705 					   SDMMC_IDINTEN_TI));
706 	}
707 
708 	/* Clear and disable interrups for a while */
709 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
710 	WRITE4(sc, SDMMC_INTMASK, 0);
711 
712 	/* Maximum timeout */
713 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
714 
715 	/* Enable interrupts */
716 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
717 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
718 				   SDMMC_INTMASK_DTO |
719 				   SDMMC_INTMASK_ACD |
720 				   SDMMC_INTMASK_TXDR |
721 				   SDMMC_INTMASK_RXDR |
722 				   DWMMC_ERR_FLAGS |
723 				   SDMMC_INTMASK_CD));
724 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
725 
726 	sc->host.f_min = 400000;
727 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
728 	sc->host.caps |= MMC_CAP_HSPEED;
729 	sc->host.caps |= MMC_CAP_SIGNALING_330;
730 
731 	TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
732 	TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0,
733 		dwmmc_card_task, sc);
734 
735 	/*
736 	 * Schedule a card detection as we won't get an interrupt
737 	 * if the card is inserted when we attach
738 	 */
739 	dwmmc_card_task(sc, 0);
740 
741 	return (0);
742 }
743 
744 int
745 dwmmc_detach(device_t dev)
746 {
747 	struct dwmmc_softc *sc;
748 	int ret;
749 
750 	sc = device_get_softc(dev);
751 
752 	ret = device_delete_children(dev);
753 	if (ret != 0)
754 		return (ret);
755 
756 	taskqueue_drain(taskqueue_swi_giant, &sc->card_task);
757 	taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task);
758 
759 	if (sc->intr_cookie != NULL) {
760 		ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
761 		if (ret != 0)
762 			return (ret);
763 	}
764 	bus_release_resources(dev, dwmmc_spec, sc->res);
765 
766 	DWMMC_LOCK_DESTROY(sc);
767 
768 #ifdef EXT_RESOURCES
769 	if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0)
770 		device_printf(sc->dev, "cannot deassert reset\n");
771 	if (sc->biu != NULL && clk_disable(sc->biu) != 0)
772 		device_printf(sc->dev, "cannot disable biu clock\n");
773 	if (sc->ciu != NULL && clk_disable(sc->ciu) != 0)
774 			device_printf(sc->dev, "cannot disable ciu clock\n");
775 
776 	if (sc->vmmc && regulator_disable(sc->vmmc) != 0)
777 		device_printf(sc->dev, "Cannot disable vmmc regulator\n");
778 	if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0)
779 		device_printf(sc->dev, "Cannot disable vqmmc regulator\n");
780 #endif
781 
782 	return (0);
783 }
784 
785 static int
786 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
787 {
788 	int tout;
789 	int div;
790 
791 	if (freq == 0) {
792 		WRITE4(sc, SDMMC_CLKENA, 0);
793 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
794 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
795 
796 		tout = 1000;
797 		do {
798 			if (tout-- < 0) {
799 				device_printf(sc->dev, "Failed update clk\n");
800 				return (1);
801 			}
802 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
803 
804 		return (0);
805 	}
806 
807 	WRITE4(sc, SDMMC_CLKENA, 0);
808 	WRITE4(sc, SDMMC_CLKSRC, 0);
809 
810 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
811 
812 	WRITE4(sc, SDMMC_CLKDIV, div);
813 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
814 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
815 
816 	tout = 1000;
817 	do {
818 		if (tout-- < 0) {
819 			device_printf(sc->dev, "Failed to update clk");
820 			return (1);
821 		}
822 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
823 
824 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
825 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
826 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
827 
828 	tout = 1000;
829 	do {
830 		if (tout-- < 0) {
831 			device_printf(sc->dev, "Failed to enable clk\n");
832 			return (1);
833 		}
834 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
835 
836 	return (0);
837 }
838 
839 static int
840 dwmmc_update_ios(device_t brdev, device_t reqdev)
841 {
842 	struct dwmmc_softc *sc;
843 	struct mmc_ios *ios;
844 	uint32_t reg;
845 	int ret = 0;
846 
847 	sc = device_get_softc(brdev);
848 	ios = &sc->host.ios;
849 
850 	dprintf("Setting up clk %u bus_width %d\n",
851 		ios->clock, ios->bus_width);
852 
853 	if (ios->bus_width == bus_width_8)
854 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
855 	else if (ios->bus_width == bus_width_4)
856 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
857 	else
858 		WRITE4(sc, SDMMC_CTYPE, 0);
859 
860 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
861 		/* XXX: take care about DDR or SDR use here */
862 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
863 	}
864 
865 	/* Set DDR mode */
866 	reg = READ4(sc, SDMMC_UHS_REG);
867 	if (ios->timing == bus_timing_uhs_ddr50 ||
868 	    ios->timing == bus_timing_mmc_ddr52 ||
869 	    ios->timing == bus_timing_mmc_hs400)
870 		reg |= (SDMMC_UHS_REG_DDR);
871 	else
872 		reg &= ~(SDMMC_UHS_REG_DDR);
873 	WRITE4(sc, SDMMC_UHS_REG, reg);
874 
875 	if (sc->update_ios)
876 		ret = sc->update_ios(sc, ios);
877 
878 	dwmmc_setup_bus(sc, ios->clock);
879 
880 	return (ret);
881 }
882 
883 static int
884 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
885 {
886 	struct mmc_data *data;
887 
888 	data = cmd->data;
889 
890 	if (data->flags & MMC_DATA_WRITE)
891 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
892 			BUS_DMASYNC_POSTWRITE);
893 	else
894 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
895 			BUS_DMASYNC_POSTREAD);
896 
897 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
898 	    BUS_DMASYNC_POSTWRITE);
899 
900 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
901 
902 	return (0);
903 }
904 
905 static int
906 dma_stop(struct dwmmc_softc *sc)
907 {
908 	int reg;
909 
910 	reg = READ4(sc, SDMMC_CTRL);
911 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
912 	reg |= (SDMMC_CTRL_DMA_RESET);
913 	WRITE4(sc, SDMMC_CTRL, reg);
914 
915 	reg = READ4(sc, SDMMC_BMOD);
916 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
917 	reg |= (SDMMC_BMOD_SWR);
918 	WRITE4(sc, SDMMC_BMOD, reg);
919 
920 	return (0);
921 }
922 
923 static int
924 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
925 {
926 	struct mmc_data *data;
927 	int err;
928 	int reg;
929 
930 	data = cmd->data;
931 
932 	reg = READ4(sc, SDMMC_INTMASK);
933 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
934 	WRITE4(sc, SDMMC_INTMASK, reg);
935 
936 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
937 		data->data, data->len, dwmmc_ring_setup,
938 		sc, BUS_DMA_NOWAIT);
939 	if (err != 0)
940 		panic("dmamap_load failed\n");
941 
942 	/* Ensure the device can see the desc */
943 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
944 	    BUS_DMASYNC_PREWRITE);
945 
946 	if (data->flags & MMC_DATA_WRITE)
947 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
948 			BUS_DMASYNC_PREWRITE);
949 	else
950 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
951 			BUS_DMASYNC_PREREAD);
952 
953 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
954 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
955 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
956 
957 	WRITE4(sc, SDMMC_FIFOTH, reg);
958 	wmb();
959 
960 	reg = READ4(sc, SDMMC_CTRL);
961 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
962 	WRITE4(sc, SDMMC_CTRL, reg);
963 	wmb();
964 
965 	reg = READ4(sc, SDMMC_BMOD);
966 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
967 	WRITE4(sc, SDMMC_BMOD, reg);
968 
969 	/* Start */
970 	WRITE4(sc, SDMMC_PLDMND, 1);
971 
972 	return (0);
973 }
974 
975 static int
976 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
977 {
978 	struct mmc_data *data;
979 	int reg;
980 
981 	data = cmd->data;
982 	data->xfer_len = 0;
983 
984 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
985 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
986 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
987 
988 	WRITE4(sc, SDMMC_FIFOTH, reg);
989 	wmb();
990 
991 	return (0);
992 }
993 
994 static void
995 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
996 {
997 	struct mmc_data *data;
998 	uint32_t *p, status;
999 
1000 	if (cmd == NULL || cmd->data == NULL)
1001 		return;
1002 
1003 	data = cmd->data;
1004 	if ((data->flags & MMC_DATA_READ) == 0)
1005 		return;
1006 
1007 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1008 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1009 
1010 	while (data->xfer_len < data->len) {
1011 		status = READ4(sc, SDMMC_STATUS);
1012 		if (status & SDMMC_STATUS_FIFO_EMPTY)
1013 			break;
1014 		*p++ = READ4(sc, SDMMC_DATA);
1015 		data->xfer_len += 4;
1016 	}
1017 
1018 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
1019 }
1020 
1021 static void
1022 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
1023 {
1024 	struct mmc_data *data;
1025 	uint32_t *p, status;
1026 
1027 	if (cmd == NULL || cmd->data == NULL)
1028 		return;
1029 
1030 	data = cmd->data;
1031 	if ((data->flags & MMC_DATA_WRITE) == 0)
1032 		return;
1033 
1034 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1035 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1036 
1037 	while (data->xfer_len < data->len) {
1038 		status = READ4(sc, SDMMC_STATUS);
1039 		if (status & SDMMC_STATUS_FIFO_FULL)
1040 			break;
1041 		WRITE4(sc, SDMMC_DATA, *p++);
1042 		data->xfer_len += 4;
1043 	}
1044 
1045 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
1046 }
1047 
1048 static void
1049 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
1050 {
1051 	struct mmc_data *data;
1052 	uint32_t blksz;
1053 	uint32_t cmdr;
1054 
1055 	sc->curcmd = cmd;
1056 	data = cmd->data;
1057 
1058 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
1059 		dwmmc_setup_bus(sc, sc->host.ios.clock);
1060 
1061 	/* XXX Upper layers don't always set this */
1062 	cmd->mrq = sc->req;
1063 
1064 	/* Begin setting up command register. */
1065 
1066 	cmdr = cmd->opcode;
1067 
1068 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
1069 
1070 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
1071 	    cmd->opcode == MMC_GO_IDLE_STATE ||
1072 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
1073 		cmdr |= SDMMC_CMD_STOP_ABORT;
1074 	else if (cmd->opcode != MMC_SEND_STATUS && data)
1075 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
1076 
1077 	/* Set up response handling. */
1078 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
1079 		cmdr |= SDMMC_CMD_RESP_EXP;
1080 		if (cmd->flags & MMC_RSP_136)
1081 			cmdr |= SDMMC_CMD_RESP_LONG;
1082 	}
1083 
1084 	if (cmd->flags & MMC_RSP_CRC)
1085 		cmdr |= SDMMC_CMD_RESP_CRC;
1086 
1087 	/*
1088 	 * XXX: Not all platforms want this.
1089 	 */
1090 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
1091 
1092 	if ((sc->flags & CARD_INIT_DONE) == 0) {
1093 		sc->flags |= (CARD_INIT_DONE);
1094 		cmdr |= SDMMC_CMD_SEND_INIT;
1095 	}
1096 
1097 	if (data) {
1098 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1099 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
1100 		     sc->use_auto_stop)
1101 			cmdr |= SDMMC_CMD_SEND_ASTOP;
1102 
1103 		cmdr |= SDMMC_CMD_DATA_EXP;
1104 		if (data->flags & MMC_DATA_STREAM)
1105 			cmdr |= SDMMC_CMD_MODE_STREAM;
1106 		if (data->flags & MMC_DATA_WRITE)
1107 			cmdr |= SDMMC_CMD_DATA_WRITE;
1108 
1109 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
1110 		WRITE4(sc, SDMMC_BYTCNT, data->len);
1111 		blksz = (data->len < MMC_SECTOR_SIZE) ? \
1112 			 data->len : MMC_SECTOR_SIZE;
1113 		WRITE4(sc, SDMMC_BLKSIZ, blksz);
1114 
1115 		if (sc->use_pio) {
1116 			pio_prepare(sc, cmd);
1117 		} else {
1118 			dma_prepare(sc, cmd);
1119 		}
1120 		wmb();
1121 	}
1122 
1123 	dprintf("cmdr 0x%08x\n", cmdr);
1124 
1125 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
1126 	wmb();
1127 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
1128 };
1129 
1130 static void
1131 dwmmc_next_operation(struct dwmmc_softc *sc)
1132 {
1133 	struct mmc_request *req;
1134 
1135 	req = sc->req;
1136 	if (req == NULL)
1137 		return;
1138 
1139 	sc->acd_rcvd = 0;
1140 	sc->dto_rcvd = 0;
1141 	sc->cmd_done = 0;
1142 
1143 	/*
1144 	 * XXX: Wait until card is still busy.
1145 	 * We do need this to prevent data timeouts,
1146 	 * mostly caused by multi-block write command
1147 	 * followed by single-read.
1148 	 */
1149 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1150 		continue;
1151 
1152 	if (sc->flags & PENDING_CMD) {
1153 		sc->flags &= ~PENDING_CMD;
1154 		dwmmc_start_cmd(sc, req->cmd);
1155 		return;
1156 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1157 		sc->flags &= ~PENDING_STOP;
1158 		dwmmc_start_cmd(sc, req->stop);
1159 		return;
1160 	}
1161 
1162 	sc->req = NULL;
1163 	sc->curcmd = NULL;
1164 	req->done(req);
1165 }
1166 
1167 static int
1168 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1169 {
1170 	struct dwmmc_softc *sc;
1171 
1172 	sc = device_get_softc(brdev);
1173 
1174 	dprintf("%s\n", __func__);
1175 
1176 	DWMMC_LOCK(sc);
1177 
1178 	if (sc->req != NULL) {
1179 		DWMMC_UNLOCK(sc);
1180 		return (EBUSY);
1181 	}
1182 
1183 	sc->req = req;
1184 	sc->flags |= PENDING_CMD;
1185 	if (sc->req->stop)
1186 		sc->flags |= PENDING_STOP;
1187 	dwmmc_next_operation(sc);
1188 
1189 	DWMMC_UNLOCK(sc);
1190 	return (0);
1191 }
1192 
1193 static int
1194 dwmmc_get_ro(device_t brdev, device_t reqdev)
1195 {
1196 
1197 	dprintf("%s\n", __func__);
1198 
1199 	return (0);
1200 }
1201 
1202 static int
1203 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1204 {
1205 	struct dwmmc_softc *sc;
1206 
1207 	sc = device_get_softc(brdev);
1208 
1209 	DWMMC_LOCK(sc);
1210 	while (sc->bus_busy)
1211 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1212 	sc->bus_busy++;
1213 	DWMMC_UNLOCK(sc);
1214 	return (0);
1215 }
1216 
1217 static int
1218 dwmmc_release_host(device_t brdev, device_t reqdev)
1219 {
1220 	struct dwmmc_softc *sc;
1221 
1222 	sc = device_get_softc(brdev);
1223 
1224 	DWMMC_LOCK(sc);
1225 	sc->bus_busy--;
1226 	wakeup(sc);
1227 	DWMMC_UNLOCK(sc);
1228 	return (0);
1229 }
1230 
1231 static int
1232 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1233 {
1234 	struct dwmmc_softc *sc;
1235 
1236 	sc = device_get_softc(bus);
1237 
1238 	switch (which) {
1239 	default:
1240 		return (EINVAL);
1241 	case MMCBR_IVAR_BUS_MODE:
1242 		*(int *)result = sc->host.ios.bus_mode;
1243 		break;
1244 	case MMCBR_IVAR_BUS_WIDTH:
1245 		*(int *)result = sc->host.ios.bus_width;
1246 		break;
1247 	case MMCBR_IVAR_CHIP_SELECT:
1248 		*(int *)result = sc->host.ios.chip_select;
1249 		break;
1250 	case MMCBR_IVAR_CLOCK:
1251 		*(int *)result = sc->host.ios.clock;
1252 		break;
1253 	case MMCBR_IVAR_F_MIN:
1254 		*(int *)result = sc->host.f_min;
1255 		break;
1256 	case MMCBR_IVAR_F_MAX:
1257 		*(int *)result = sc->host.f_max;
1258 		break;
1259 	case MMCBR_IVAR_HOST_OCR:
1260 		*(int *)result = sc->host.host_ocr;
1261 		break;
1262 	case MMCBR_IVAR_MODE:
1263 		*(int *)result = sc->host.mode;
1264 		break;
1265 	case MMCBR_IVAR_OCR:
1266 		*(int *)result = sc->host.ocr;
1267 		break;
1268 	case MMCBR_IVAR_POWER_MODE:
1269 		*(int *)result = sc->host.ios.power_mode;
1270 		break;
1271 	case MMCBR_IVAR_VDD:
1272 		*(int *)result = sc->host.ios.vdd;
1273 		break;
1274 	case MMCBR_IVAR_VCCQ:
1275 		*(int *)result = sc->host.ios.vccq;
1276 		break;
1277 	case MMCBR_IVAR_CAPS:
1278 		*(int *)result = sc->host.caps;
1279 		break;
1280 	case MMCBR_IVAR_MAX_DATA:
1281 		*(int *)result = sc->desc_count;
1282 		break;
1283 	case MMCBR_IVAR_TIMING:
1284 		*(int *)result = sc->host.ios.timing;
1285 		break;
1286 	}
1287 	return (0);
1288 }
1289 
1290 static int
1291 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1292 {
1293 	struct dwmmc_softc *sc;
1294 
1295 	sc = device_get_softc(bus);
1296 
1297 	switch (which) {
1298 	default:
1299 		return (EINVAL);
1300 	case MMCBR_IVAR_BUS_MODE:
1301 		sc->host.ios.bus_mode = value;
1302 		break;
1303 	case MMCBR_IVAR_BUS_WIDTH:
1304 		sc->host.ios.bus_width = value;
1305 		break;
1306 	case MMCBR_IVAR_CHIP_SELECT:
1307 		sc->host.ios.chip_select = value;
1308 		break;
1309 	case MMCBR_IVAR_CLOCK:
1310 		sc->host.ios.clock = value;
1311 		break;
1312 	case MMCBR_IVAR_MODE:
1313 		sc->host.mode = value;
1314 		break;
1315 	case MMCBR_IVAR_OCR:
1316 		sc->host.ocr = value;
1317 		break;
1318 	case MMCBR_IVAR_POWER_MODE:
1319 		sc->host.ios.power_mode = value;
1320 		break;
1321 	case MMCBR_IVAR_VDD:
1322 		sc->host.ios.vdd = value;
1323 		break;
1324 	case MMCBR_IVAR_TIMING:
1325 		sc->host.ios.timing = value;
1326 		break;
1327 	case MMCBR_IVAR_VCCQ:
1328 		sc->host.ios.vccq = value;
1329 		break;
1330 	/* These are read-only */
1331 	case MMCBR_IVAR_CAPS:
1332 	case MMCBR_IVAR_HOST_OCR:
1333 	case MMCBR_IVAR_F_MIN:
1334 	case MMCBR_IVAR_F_MAX:
1335 	case MMCBR_IVAR_MAX_DATA:
1336 		return (EINVAL);
1337 	}
1338 	return (0);
1339 }
1340 
1341 static device_method_t dwmmc_methods[] = {
1342 	/* Bus interface */
1343 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1344 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1345 
1346 	/* mmcbr_if */
1347 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1348 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1349 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1350 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1351 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1352 
1353 	DEVMETHOD_END
1354 };
1355 
1356 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1357     sizeof(struct dwmmc_softc));
1358