xref: /freebsd/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c (revision 3332f1b444d4a73238e9f59cca27bfc95fe936bd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012 Oleksandr Tymoshenko <gonzo@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/rman.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
44 
45 #include <machine/bus.h>
46 
47 #include <dev/ofw/ofw_bus.h>
48 #include <dev/ofw/ofw_bus_subr.h>
49 
50 #include <dev/mmc/bridge.h>
51 #include <dev/mmc/mmcreg.h>
52 #include <dev/mmc/mmc_fdt_helpers.h>
53 
54 #include <dev/sdhci/sdhci.h>
55 
56 #include "mmcbr_if.h"
57 #include "sdhci_if.h"
58 
59 #include "opt_mmccam.h"
60 
61 #include "bcm2835_dma.h"
62 #include <arm/broadcom/bcm2835/bcm2835_mbox_prop.h>
63 #ifdef NOTYET
64 #include <arm/broadcom/bcm2835/bcm2835_clkman.h>
65 #endif
66 #include <arm/broadcom/bcm2835/bcm2835_vcbus.h>
67 
68 #define	BCM2835_DEFAULT_SDHCI_FREQ	50
69 #define	BCM2838_DEFAULT_SDHCI_FREQ	100
70 
71 #define	BCM_SDHCI_BUFFER_SIZE		512
72 /*
73  * NUM_DMA_SEGS is the number of DMA segments we want to accommodate on average.
74  * We add in a number of segments based on how much we may need to spill into
75  * another segment due to crossing page boundaries.  e.g. up to PAGE_SIZE, an
76  * extra page is needed as we can cross a page boundary exactly once.
77  */
78 #define	NUM_DMA_SEGS			1
79 #define	NUM_DMA_SPILL_SEGS		\
80 	((((NUM_DMA_SEGS * BCM_SDHCI_BUFFER_SIZE) - 1) / PAGE_SIZE) + 1)
81 #define	ALLOCATED_DMA_SEGS		(NUM_DMA_SEGS +	NUM_DMA_SPILL_SEGS)
82 #define	BCM_DMA_MAXSIZE			(NUM_DMA_SEGS * BCM_SDHCI_BUFFER_SIZE)
83 
84 #define	BCM_SDHCI_SLOT_LEFT(slot)	\
85 	((slot)->curcmd->data->len - (slot)->offset)
86 
87 #define	BCM_SDHCI_SEGSZ_LEFT(slot)	\
88 	min(BCM_DMA_MAXSIZE,		\
89 	    rounddown(BCM_SDHCI_SLOT_LEFT(slot), BCM_SDHCI_BUFFER_SIZE))
90 
91 #define	DATA_PENDING_MASK	(SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)
92 #define	DATA_XFER_MASK		(DATA_PENDING_MASK | SDHCI_INT_DATA_END)
93 
94 #ifdef DEBUG
95 static int bcm2835_sdhci_debug = 0;
96 
97 TUNABLE_INT("hw.bcm2835.sdhci.debug", &bcm2835_sdhci_debug);
98 SYSCTL_INT(_hw_sdhci, OID_AUTO, bcm2835_sdhci_debug, CTLFLAG_RWTUN,
99     &bcm2835_sdhci_debug, 0, "bcm2835 SDHCI debug level");
100 
101 #define	dprintf(fmt, args...)					\
102 	do {							\
103 		if (bcm2835_sdhci_debug)			\
104 			printf("%s: " fmt, __func__, ##args);	\
105 	}  while (0)
106 #else
107 #define dprintf(fmt, args...)
108 #endif
109 
110 static int bcm2835_sdhci_hs = 1;
111 static int bcm2835_sdhci_pio_mode = 0;
112 
113 struct bcm_mmc_conf {
114 	int	clock_id;
115 	int	clock_src;
116 	int	default_freq;
117 	int	quirks;
118 	int	emmc_dreq;
119 };
120 
121 struct bcm_mmc_conf bcm2835_sdhci_conf = {
122 	.clock_id	= BCM2835_MBOX_CLOCK_ID_EMMC,
123 	.clock_src	= -1,
124 	.default_freq	= BCM2835_DEFAULT_SDHCI_FREQ,
125 	.quirks		= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
126 	    SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DONT_SET_HISPD_BIT |
127 	    SDHCI_QUIRK_MISSING_CAPS,
128 	.emmc_dreq	= BCM_DMA_DREQ_EMMC,
129 };
130 
131 struct bcm_mmc_conf bcm2838_emmc2_conf = {
132 	.clock_id	= BCM2838_MBOX_CLOCK_ID_EMMC2,
133 	.clock_src	= -1,
134 	.default_freq	= BCM2838_DEFAULT_SDHCI_FREQ,
135 	.quirks		= 0,
136 	.emmc_dreq	= BCM_DMA_DREQ_NONE,
137 };
138 
139 static struct ofw_compat_data compat_data[] = {
140 	{"broadcom,bcm2835-sdhci",	(uintptr_t)&bcm2835_sdhci_conf},
141 	{"brcm,bcm2835-sdhci",		(uintptr_t)&bcm2835_sdhci_conf},
142 	{"brcm,bcm2835-mmc",		(uintptr_t)&bcm2835_sdhci_conf},
143 	{"brcm,bcm2711-emmc2",		(uintptr_t)&bcm2838_emmc2_conf},
144 	{"brcm,bcm2838-emmc2",		(uintptr_t)&bcm2838_emmc2_conf},
145 	{NULL,				0}
146 };
147 
148 TUNABLE_INT("hw.bcm2835.sdhci.hs", &bcm2835_sdhci_hs);
149 TUNABLE_INT("hw.bcm2835.sdhci.pio_mode", &bcm2835_sdhci_pio_mode);
150 
151 struct bcm_sdhci_softc {
152 	device_t		sc_dev;
153 	struct resource *	sc_mem_res;
154 	struct resource *	sc_irq_res;
155 	bus_space_tag_t		sc_bst;
156 	bus_space_handle_t	sc_bsh;
157 	void *			sc_intrhand;
158 	struct mmc_request *	sc_req;
159 	struct sdhci_slot	sc_slot;
160 	struct mmc_helper	sc_mmc_helper;
161 	int			sc_dma_ch;
162 	bus_dma_tag_t		sc_dma_tag;
163 	bus_dmamap_t		sc_dma_map;
164 	vm_paddr_t		sc_sdhci_buffer_phys;
165 	bus_addr_t		dmamap_seg_addrs[ALLOCATED_DMA_SEGS];
166 	bus_size_t		dmamap_seg_sizes[ALLOCATED_DMA_SEGS];
167 	int			dmamap_seg_count;
168 	int			dmamap_seg_index;
169 	int			dmamap_status;
170 	uint32_t		blksz_and_count;
171 	uint32_t		cmd_and_mode;
172 	bool			need_update_blk;
173 #ifdef NOTYET
174 	device_t		clkman;
175 #endif
176 	struct bcm_mmc_conf *	conf;
177 };
178 
179 static int bcm_sdhci_probe(device_t);
180 static int bcm_sdhci_attach(device_t);
181 static int bcm_sdhci_detach(device_t);
182 static void bcm_sdhci_intr(void *);
183 
184 static int bcm_sdhci_get_ro(device_t, device_t);
185 static void bcm_sdhci_dma_intr(int ch, void *arg);
186 static void bcm_sdhci_start_dma(struct sdhci_slot *slot);
187 
188 static void
189 bcm_sdhci_dmacb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
190 {
191 	struct bcm_sdhci_softc *sc = arg;
192 	int i;
193 
194 	/* Sanity check: we can only ever have one mapping at a time. */
195 	KASSERT(sc->dmamap_seg_count == 0, ("leaked DMA segment"));
196 	sc->dmamap_status = err;
197 	sc->dmamap_seg_count = nseg;
198 
199 	/* Note nseg is guaranteed to be zero if err is non-zero. */
200 	for (i = 0; i < nseg; i++) {
201 		sc->dmamap_seg_addrs[i] = segs[i].ds_addr;
202 		sc->dmamap_seg_sizes[i] = segs[i].ds_len;
203 	}
204 }
205 
206 static int
207 bcm_sdhci_probe(device_t dev)
208 {
209 
210 	if (!ofw_bus_status_okay(dev))
211 		return (ENXIO);
212 
213 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
214 		return (ENXIO);
215 
216 	device_set_desc(dev, "Broadcom 2708 SDHCI controller");
217 
218 	return (BUS_PROBE_DEFAULT);
219 }
220 
221 static int
222 bcm_sdhci_attach(device_t dev)
223 {
224 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
225 	int rid, err;
226 	phandle_t node;
227 	pcell_t cell;
228 	u_int default_freq;
229 
230 	sc->sc_dev = dev;
231 	sc->sc_req = NULL;
232 
233 	sc->conf = (struct bcm_mmc_conf *)ofw_bus_search_compatible(dev,
234 	    compat_data)->ocd_data;
235 	if (sc->conf == 0)
236 	    return (ENXIO);
237 
238 	err = bcm2835_mbox_set_power_state(BCM2835_MBOX_POWER_ID_EMMC, TRUE);
239 	if (err != 0) {
240 		if (bootverbose)
241 			device_printf(dev, "Unable to enable the power\n");
242 		return (err);
243 	}
244 
245 	default_freq = 0;
246 	err = bcm2835_mbox_get_clock_rate(sc->conf->clock_id, &default_freq);
247 	if (err == 0) {
248 		/* Convert to MHz */
249 		default_freq /= 1000000;
250 	}
251 	if (default_freq == 0) {
252 		node = ofw_bus_get_node(sc->sc_dev);
253 		if ((OF_getencprop(node, "clock-frequency", &cell,
254 		    sizeof(cell))) > 0)
255 			default_freq = cell / 1000000;
256 	}
257 	if (default_freq == 0)
258 		default_freq = sc->conf->default_freq;
259 
260 	if (bootverbose)
261 		device_printf(dev, "SDHCI frequency: %dMHz\n", default_freq);
262 #ifdef NOTYET
263 	if (sc->conf->clock_src > 0) {
264 		uint32_t f;
265 		sc->clkman = devclass_get_device(
266 		    devclass_find("bcm2835_clkman"), 0);
267 		if (sc->clkman == NULL) {
268 			device_printf(dev, "cannot find Clock Manager\n");
269 			return (ENXIO);
270 		}
271 
272 		f = bcm2835_clkman_set_frequency(sc->clkman,
273 		    sc->conf->clock_src, default_freq);
274 		if (f == 0)
275 			return (EINVAL);
276 
277 		if (bootverbose)
278 			device_printf(dev, "Clock source frequency: %dMHz\n",
279 			    f);
280 	}
281 #endif
282 
283 	rid = 0;
284 	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
285 	    RF_ACTIVE);
286 	if (!sc->sc_mem_res) {
287 		device_printf(dev, "cannot allocate memory window\n");
288 		err = ENXIO;
289 		goto fail;
290 	}
291 
292 	sc->sc_bst = rman_get_bustag(sc->sc_mem_res);
293 	sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res);
294 
295 	rid = 0;
296 	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
297 	    RF_ACTIVE | RF_SHAREABLE);
298 	if (!sc->sc_irq_res) {
299 		device_printf(dev, "cannot allocate interrupt\n");
300 		err = ENXIO;
301 		goto fail;
302 	}
303 
304 	if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
305 	    NULL, bcm_sdhci_intr, sc, &sc->sc_intrhand)) {
306 		device_printf(dev, "cannot setup interrupt handler\n");
307 		err = ENXIO;
308 		goto fail;
309 	}
310 
311 	if (!bcm2835_sdhci_pio_mode)
312 		sc->sc_slot.opt = SDHCI_PLATFORM_TRANSFER;
313 
314 	sc->sc_slot.caps = SDHCI_CAN_VDD_330 | SDHCI_CAN_VDD_180;
315 	if (bcm2835_sdhci_hs)
316 		sc->sc_slot.caps |= SDHCI_CAN_DO_HISPD;
317 	sc->sc_slot.caps |= (default_freq << SDHCI_CLOCK_BASE_SHIFT);
318 	sc->sc_slot.quirks = sc->conf->quirks;
319 
320 	sdhci_init_slot(dev, &sc->sc_slot, 0);
321 	mmc_fdt_parse(dev, 0, &sc->sc_mmc_helper, &sc->sc_slot.host);
322 
323 	sc->sc_dma_ch = bcm_dma_allocate(BCM_DMA_CH_ANY);
324 	if (sc->sc_dma_ch == BCM_DMA_CH_INVALID)
325 		goto fail;
326 
327 	err = bcm_dma_setup_intr(sc->sc_dma_ch, bcm_sdhci_dma_intr, sc);
328 	if (err != 0) {
329 		device_printf(dev,
330 		    "cannot setup dma interrupt handler\n");
331 		err = ENXIO;
332 		goto fail;
333 	}
334 
335 	/* Allocate bus_dma resources. */
336 	err = bus_dma_tag_create(bus_get_dma_tag(dev),
337 	    1, 0, bcm283x_dmabus_peripheral_lowaddr(),
338 	    BUS_SPACE_MAXADDR, NULL, NULL,
339 	    BCM_DMA_MAXSIZE, ALLOCATED_DMA_SEGS, BCM_SDHCI_BUFFER_SIZE,
340 	    BUS_DMA_ALLOCNOW, NULL, NULL,
341 	    &sc->sc_dma_tag);
342 
343 	if (err) {
344 		device_printf(dev, "failed allocate DMA tag");
345 		goto fail;
346 	}
347 
348 	err = bus_dmamap_create(sc->sc_dma_tag, 0, &sc->sc_dma_map);
349 	if (err) {
350 		device_printf(dev, "bus_dmamap_create failed\n");
351 		goto fail;
352 	}
353 
354 	/* FIXME: Fix along with other BUS_SPACE_PHYSADDR instances */
355 	sc->sc_sdhci_buffer_phys = rman_get_start(sc->sc_mem_res) +
356 	    SDHCI_BUFFER;
357 
358 	bus_generic_probe(dev);
359 	bus_generic_attach(dev);
360 
361 	sdhci_start_slot(&sc->sc_slot);
362 
363 	/* Seed our copies. */
364 	sc->blksz_and_count = SDHCI_READ_4(dev, &sc->sc_slot, SDHCI_BLOCK_SIZE);
365 	sc->cmd_and_mode = SDHCI_READ_4(dev, &sc->sc_slot, SDHCI_TRANSFER_MODE);
366 
367 	return (0);
368 
369 fail:
370 	if (sc->sc_intrhand)
371 		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand);
372 	if (sc->sc_irq_res)
373 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
374 	if (sc->sc_mem_res)
375 		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
376 
377 	return (err);
378 }
379 
380 static int
381 bcm_sdhci_detach(device_t dev)
382 {
383 
384 	return (EBUSY);
385 }
386 
387 static void
388 bcm_sdhci_intr(void *arg)
389 {
390 	struct bcm_sdhci_softc *sc = arg;
391 
392 	sdhci_generic_intr(&sc->sc_slot);
393 }
394 
395 static int
396 bcm_sdhci_update_ios(device_t bus, device_t child)
397 {
398 #ifdef EXT_RESOURCES
399 	struct bcm_sdhci_softc *sc;
400 	struct mmc_ios *ios;
401 #endif
402 	int rv;
403 
404 #ifdef EXT_RESOURCES
405 	sc = device_get_softc(bus);
406 	ios = &sc->sc_slot.host.ios;
407 
408 	if (ios->power_mode == power_up) {
409 		if (sc->sc_mmc_helper.vmmc_supply)
410 			regulator_enable(sc->sc_mmc_helper.vmmc_supply);
411 		if (sc->sc_mmc_helper.vqmmc_supply)
412 			regulator_enable(sc->sc_mmc_helper.vqmmc_supply);
413 	}
414 #endif
415 
416 	rv = sdhci_generic_update_ios(bus, child);
417 	if (rv != 0)
418 		return (rv);
419 
420 #ifdef EXT_RESOURCES
421 	if (ios->power_mode == power_off) {
422 		if (sc->sc_mmc_helper.vmmc_supply)
423 			regulator_disable(sc->sc_mmc_helper.vmmc_supply);
424 		if (sc->sc_mmc_helper.vqmmc_supply)
425 			regulator_disable(sc->sc_mmc_helper.vqmmc_supply);
426 	}
427 #endif
428 
429 	return (0);
430 }
431 
432 static int
433 bcm_sdhci_get_ro(device_t bus, device_t child)
434 {
435 
436 	return (0);
437 }
438 
439 static inline uint32_t
440 RD4(struct bcm_sdhci_softc *sc, bus_size_t off)
441 {
442 	uint32_t val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, off);
443 	return val;
444 }
445 
446 static inline void
447 WR4(struct bcm_sdhci_softc *sc, bus_size_t off, uint32_t val)
448 {
449 
450 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, val);
451 	/*
452 	 * The Arasan HC has a bug where it may lose the content of
453 	 * consecutive writes to registers that are within two SD-card
454 	 * clock cycles of each other (a clock domain crossing problem).
455 	 */
456 	if (sc->sc_slot.clock > 0)
457 		DELAY(((2 * 1000000) / sc->sc_slot.clock) + 1);
458 }
459 
460 static uint8_t
461 bcm_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off)
462 {
463 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
464 	uint32_t val = RD4(sc, off & ~3);
465 
466 	return ((val >> (off & 3)*8) & 0xff);
467 }
468 
469 static uint16_t
470 bcm_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off)
471 {
472 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
473 	uint32_t val32;
474 
475 	/*
476 	 * Standard 32-bit handling of command and transfer mode, as
477 	 * well as block size and count.
478 	 */
479 	if ((off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) &&
480 	    sc->need_update_blk)
481 		val32 = sc->blksz_and_count;
482 	else if (off == SDHCI_TRANSFER_MODE || off == SDHCI_COMMAND_FLAGS)
483 		val32 = sc->cmd_and_mode;
484 	else
485 		val32 = RD4(sc, off & ~3);
486 
487 	return ((val32 >> (off & 3)*8) & 0xffff);
488 }
489 
490 static uint32_t
491 bcm_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off)
492 {
493 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
494 
495 	return RD4(sc, off);
496 }
497 
498 static void
499 bcm_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off,
500     uint32_t *data, bus_size_t count)
501 {
502 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
503 
504 	bus_space_read_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count);
505 }
506 
507 static void
508 bcm_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off,
509     uint8_t val)
510 {
511 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
512 	uint32_t val32 = RD4(sc, off & ~3);
513 	val32 &= ~(0xff << (off & 3)*8);
514 	val32 |= (val << (off & 3)*8);
515 	WR4(sc, off & ~3, val32);
516 }
517 
518 static void
519 bcm_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off,
520     uint16_t val)
521 {
522 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
523 	uint32_t val32;
524 
525 	/*
526 	 * If we have a queued up 16bit value for blk size or count, use and
527 	 * update the saved value rather than doing any real register access.
528 	 * If we did not touch either since the last write, then read from
529 	 * register as at least block count can change.
530 	 * Similarly, if we are about to issue a command, always use the saved
531 	 * value for transfer mode as we can never write that without issuing
532 	 * a command.
533 	 */
534 	if ((off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) &&
535 	    sc->need_update_blk)
536 		val32 = sc->blksz_and_count;
537 	else if (off == SDHCI_COMMAND_FLAGS)
538 		val32 = sc->cmd_and_mode;
539 	else
540 		val32 = RD4(sc, off & ~3);
541 
542 	val32 &= ~(0xffff << (off & 3)*8);
543 	val32 |= (val << (off & 3)*8);
544 
545 	if (off == SDHCI_TRANSFER_MODE)
546 		sc->cmd_and_mode = val32;
547 	else if (off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) {
548 		sc->blksz_and_count = val32;
549 		sc->need_update_blk = true;
550 	} else {
551 		if (off == SDHCI_COMMAND_FLAGS) {
552 			/* If we saved blk writes, do them now before cmd. */
553 			if (sc->need_update_blk) {
554 				WR4(sc, SDHCI_BLOCK_SIZE, sc->blksz_and_count);
555 				sc->need_update_blk = false;
556 			}
557 			/* Always save cmd and mode registers. */
558 			sc->cmd_and_mode = val32;
559 		}
560 		WR4(sc, off & ~3, val32);
561 	}
562 }
563 
564 static void
565 bcm_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off,
566     uint32_t val)
567 {
568 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
569 	WR4(sc, off, val);
570 }
571 
572 static void
573 bcm_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off,
574     uint32_t *data, bus_size_t count)
575 {
576 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
577 
578 	bus_space_write_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count);
579 }
580 
581 static void
582 bcm_sdhci_start_dma_seg(struct bcm_sdhci_softc *sc)
583 {
584 	struct sdhci_slot *slot;
585 	vm_paddr_t pdst, psrc;
586 	int err, idx, len, sync_op, width;
587 
588 	slot = &sc->sc_slot;
589 	mtx_assert(&slot->mtx, MA_OWNED);
590 	idx = sc->dmamap_seg_index++;
591 	len = sc->dmamap_seg_sizes[idx];
592 	slot->offset += len;
593 	width = (len & 0xf ? BCM_DMA_32BIT : BCM_DMA_128BIT);
594 
595 	if (slot->curcmd->data->flags & MMC_DATA_READ) {
596 		/*
597 		 * Peripherals on the AXI bus do not need DREQ pacing for reads
598 		 * from the ARM core, so we can safely set this to NONE.
599 		 */
600 		bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_NONE,
601 		    BCM_DMA_SAME_ADDR, BCM_DMA_32BIT);
602 		bcm_dma_setup_dst(sc->sc_dma_ch, BCM_DMA_DREQ_NONE,
603 		    BCM_DMA_INC_ADDR, width);
604 		psrc = sc->sc_sdhci_buffer_phys;
605 		pdst = sc->dmamap_seg_addrs[idx];
606 		sync_op = BUS_DMASYNC_PREREAD;
607 	} else {
608 		/*
609 		 * The ordering here is important, because the last write to
610 		 * dst/src in the dma control block writes the real dreq value.
611 		 */
612 		bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_NONE,
613 		    BCM_DMA_INC_ADDR, width);
614 		bcm_dma_setup_dst(sc->sc_dma_ch, sc->conf->emmc_dreq,
615 		    BCM_DMA_SAME_ADDR, BCM_DMA_32BIT);
616 		psrc = sc->dmamap_seg_addrs[idx];
617 		pdst = sc->sc_sdhci_buffer_phys;
618 		sync_op = BUS_DMASYNC_PREWRITE;
619 	}
620 
621 	/*
622 	 * When starting a new DMA operation do the busdma sync operation, and
623 	 * disable SDCHI data interrrupts because we'll be driven by DMA
624 	 * interrupts (or SDHCI error interrupts) until the IO is done.
625 	 */
626 	if (idx == 0) {
627 		bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, sync_op);
628 
629 		slot->intmask &= ~DATA_XFER_MASK;
630 		bcm_sdhci_write_4(sc->sc_dev, slot, SDHCI_SIGNAL_ENABLE,
631 		    slot->intmask);
632 	}
633 
634 	/*
635 	 * Start the DMA transfer.  Only programming errors (like failing to
636 	 * allocate a channel) cause a non-zero return from bcm_dma_start().
637 	 */
638 	err = bcm_dma_start(sc->sc_dma_ch, psrc, pdst, len);
639 	KASSERT((err == 0), ("bcm2835_sdhci: failed DMA start"));
640 }
641 
642 static void
643 bcm_sdhci_dma_exit(struct bcm_sdhci_softc *sc)
644 {
645 	struct sdhci_slot *slot = &sc->sc_slot;
646 
647 	mtx_assert(&slot->mtx, MA_OWNED);
648 
649 	/* Re-enable interrupts */
650 	slot->intmask |= DATA_XFER_MASK;
651 	bcm_sdhci_write_4(slot->bus, slot, SDHCI_SIGNAL_ENABLE,
652 	    slot->intmask);
653 }
654 
655 static void
656 bcm_sdhci_dma_unload(struct bcm_sdhci_softc *sc)
657 {
658 	struct sdhci_slot *slot = &sc->sc_slot;
659 
660 	if (sc->dmamap_seg_count == 0)
661 		return;
662 	if ((slot->curcmd->data->flags & MMC_DATA_READ) != 0)
663 		bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map,
664 		    BUS_DMASYNC_POSTREAD);
665 	else
666 		bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map,
667 		    BUS_DMASYNC_POSTWRITE);
668 	bus_dmamap_unload(sc->sc_dma_tag, sc->sc_dma_map);
669 
670 	sc->dmamap_seg_count = 0;
671 	sc->dmamap_seg_index = 0;
672 }
673 
674 static void
675 bcm_sdhci_dma_intr(int ch, void *arg)
676 {
677 	struct bcm_sdhci_softc *sc = (struct bcm_sdhci_softc *)arg;
678 	struct sdhci_slot *slot = &sc->sc_slot;
679 	uint32_t reg;
680 
681 	mtx_lock(&slot->mtx);
682 	if (slot->curcmd == NULL)
683 		goto out;
684 	/*
685 	 * If there are more segments for the current dma, start the next one.
686 	 * Otherwise unload the dma map and decide what to do next based on the
687 	 * status of the sdhci controller and whether there's more data left.
688 	 */
689 	if (sc->dmamap_seg_index < sc->dmamap_seg_count) {
690 		bcm_sdhci_start_dma_seg(sc);
691 		goto out;
692 	}
693 
694 	bcm_sdhci_dma_unload(sc);
695 
696 	/*
697 	 * If we had no further segments pending, we need to determine how to
698 	 * proceed next.  If the 'data/space pending' bit is already set and we
699 	 * can continue via DMA, do so.  Otherwise, re-enable interrupts and
700 	 * return.
701 	 */
702 	reg = bcm_sdhci_read_4(slot->bus, slot, SDHCI_INT_STATUS) &
703 	    DATA_XFER_MASK;
704 	if ((reg & DATA_PENDING_MASK) != 0 &&
705 	    BCM_SDHCI_SEGSZ_LEFT(slot) >= BCM_SDHCI_BUFFER_SIZE) {
706 		/* ACK any pending interrupts */
707 		bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS,
708 		    DATA_PENDING_MASK);
709 
710 		bcm_sdhci_start_dma(slot);
711 		if (slot->curcmd->error != 0) {
712 			/* We won't recover from this error for this command. */
713 			bcm_sdhci_dma_unload(sc);
714 			bcm_sdhci_dma_exit(sc);
715 			sdhci_finish_data(slot);
716 		}
717 	} else if ((reg & SDHCI_INT_DATA_END) != 0) {
718 		bcm_sdhci_dma_exit(sc);
719 		bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS,
720 		    reg);
721 		slot->flags &= ~PLATFORM_DATA_STARTED;
722 		sdhci_finish_data(slot);
723 	} else {
724 		bcm_sdhci_dma_exit(sc);
725 	}
726 out:
727 	mtx_unlock(&slot->mtx);
728 }
729 
730 static void
731 bcm_sdhci_start_dma(struct sdhci_slot *slot)
732 {
733 	struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
734 	uint8_t *buf;
735 	size_t left;
736 
737 	mtx_assert(&slot->mtx, MA_OWNED);
738 
739 	left = BCM_SDHCI_SEGSZ_LEFT(slot);
740 	buf = (uint8_t *)slot->curcmd->data->data + slot->offset;
741 	KASSERT(left != 0,
742 	    ("%s: DMA handling incorrectly indicated", __func__));
743 
744 	/*
745 	 * No need to check segment count here; if we've not yet unloaded
746 	 * previous segments, we'll catch that in bcm_sdhci_dmacb.
747 	 */
748 	if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, buf, left,
749 	    bcm_sdhci_dmacb, sc, BUS_DMA_NOWAIT) != 0 ||
750 	    sc->dmamap_status != 0) {
751 		slot->curcmd->error = MMC_ERR_NO_MEMORY;
752 		return;
753 	}
754 
755 	/* DMA start */
756 	bcm_sdhci_start_dma_seg(sc);
757 }
758 
759 static int
760 bcm_sdhci_will_handle_transfer(device_t dev, struct sdhci_slot *slot)
761 {
762 #ifdef INVARIANTS
763 	struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
764 #endif
765 
766 	/*
767 	 * We don't want to perform DMA in this context -- interrupts are
768 	 * disabled, and a transaction may already be in progress.
769 	 */
770 	if (dumping)
771 		return (0);
772 
773 	/*
774 	 * This indicates that we somehow let a data interrupt slip by into the
775 	 * SDHCI framework, when it should not have.  This really needs to be
776 	 * caught and fixed ASAP, as it really shouldn't happen.
777 	 */
778 	KASSERT(sc->dmamap_seg_count == 0,
779 	    ("data pending interrupt pushed through SDHCI framework"));
780 
781 	/*
782 	 * Do not use DMA for transfers less than our block size.  Checking
783 	 * alignment serves little benefit, as we round transfer sizes down to
784 	 * a multiple of the block size and push the transfer back to
785 	 * SDHCI-driven PIO once we're below the block size.
786 	 */
787 	if (BCM_SDHCI_SEGSZ_LEFT(slot) < BCM_DMA_BLOCK_SIZE)
788 		return (0);
789 
790 	return (1);
791 }
792 
793 static void
794 bcm_sdhci_start_transfer(device_t dev, struct sdhci_slot *slot,
795     uint32_t *intmask)
796 {
797 
798 	/* DMA transfer FIFO 1KB */
799 	bcm_sdhci_start_dma(slot);
800 }
801 
802 static void
803 bcm_sdhci_finish_transfer(device_t dev, struct sdhci_slot *slot)
804 {
805 	struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
806 
807 	/*
808 	 * Clean up.  Interrupts are clearly enabled, because we received an
809 	 * SDHCI_INT_DATA_END to get this far -- just make sure we don't leave
810 	 * anything laying around.
811 	 */
812 	if (sc->dmamap_seg_count != 0) {
813 		/*
814 		 * Our segment math should have worked out such that we would
815 		 * never finish the transfer without having used up all of the
816 		 * segments.  If we haven't, that means we must have erroneously
817 		 * regressed to SDHCI-driven PIO to finish the operation and
818 		 * this is certainly caused by developer-error.
819 		 */
820 		bcm_sdhci_dma_unload(sc);
821 	}
822 
823 	sdhci_finish_data(slot);
824 }
825 
826 static device_method_t bcm_sdhci_methods[] = {
827 	/* Device interface */
828 	DEVMETHOD(device_probe,		bcm_sdhci_probe),
829 	DEVMETHOD(device_attach,	bcm_sdhci_attach),
830 	DEVMETHOD(device_detach,	bcm_sdhci_detach),
831 
832 	/* Bus interface */
833 	DEVMETHOD(bus_read_ivar,	sdhci_generic_read_ivar),
834 	DEVMETHOD(bus_write_ivar,	sdhci_generic_write_ivar),
835 	DEVMETHOD(bus_add_child,	bus_generic_add_child),
836 
837 	/* MMC bridge interface */
838 	DEVMETHOD(mmcbr_update_ios,	bcm_sdhci_update_ios),
839 	DEVMETHOD(mmcbr_request,	sdhci_generic_request),
840 	DEVMETHOD(mmcbr_get_ro,		bcm_sdhci_get_ro),
841 	DEVMETHOD(mmcbr_acquire_host,	sdhci_generic_acquire_host),
842 	DEVMETHOD(mmcbr_release_host,	sdhci_generic_release_host),
843 
844 	/* Platform transfer methods */
845 	DEVMETHOD(sdhci_platform_will_handle,		bcm_sdhci_will_handle_transfer),
846 	DEVMETHOD(sdhci_platform_start_transfer,	bcm_sdhci_start_transfer),
847 	DEVMETHOD(sdhci_platform_finish_transfer,	bcm_sdhci_finish_transfer),
848 	/* SDHCI registers accessors */
849 	DEVMETHOD(sdhci_read_1,		bcm_sdhci_read_1),
850 	DEVMETHOD(sdhci_read_2,		bcm_sdhci_read_2),
851 	DEVMETHOD(sdhci_read_4,		bcm_sdhci_read_4),
852 	DEVMETHOD(sdhci_read_multi_4,	bcm_sdhci_read_multi_4),
853 	DEVMETHOD(sdhci_write_1,	bcm_sdhci_write_1),
854 	DEVMETHOD(sdhci_write_2,	bcm_sdhci_write_2),
855 	DEVMETHOD(sdhci_write_4,	bcm_sdhci_write_4),
856 	DEVMETHOD(sdhci_write_multi_4,	bcm_sdhci_write_multi_4),
857 
858 	DEVMETHOD_END
859 };
860 
861 static devclass_t bcm_sdhci_devclass;
862 
863 static driver_t bcm_sdhci_driver = {
864 	"sdhci_bcm",
865 	bcm_sdhci_methods,
866 	sizeof(struct bcm_sdhci_softc),
867 };
868 
869 DRIVER_MODULE(sdhci_bcm, simplebus, bcm_sdhci_driver, bcm_sdhci_devclass,
870     NULL, NULL);
871 #ifdef NOTYET
872 MODULE_DEPEND(sdhci_bcm, bcm2835_clkman, 1, 1, 1);
873 #endif
874 SDHCI_DEPEND(sdhci_bcm);
875 #ifndef MMCCAM
876 MMC_DECLARE_BRIDGE(sdhci_bcm);
877 #endif
878