xref: /freebsd/sys/arm/broadcom/bcm2835/bcm2835_sdhci.c (revision 18250ec6c089c0c50cbd9fd87d78e03ff89916df)
1a9387eb1SOleksandr Tymoshenko /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3af3dc4a7SPedro F. Giffuni  *
4a9387eb1SOleksandr Tymoshenko  * Copyright (c) 2012 Oleksandr Tymoshenko <gonzo@freebsd.org>
5a9387eb1SOleksandr Tymoshenko  * All rights reserved.
6a9387eb1SOleksandr Tymoshenko  *
7a9387eb1SOleksandr Tymoshenko  * Redistribution and use in source and binary forms, with or without
8a9387eb1SOleksandr Tymoshenko  * modification, are permitted provided that the following conditions
9a9387eb1SOleksandr Tymoshenko  * are met:
10a9387eb1SOleksandr Tymoshenko  * 1. Redistributions of source code must retain the above copyright
11a9387eb1SOleksandr Tymoshenko  *    notice, this list of conditions and the following disclaimer.
12a9387eb1SOleksandr Tymoshenko  * 2. Redistributions in binary form must reproduce the above copyright
13a9387eb1SOleksandr Tymoshenko  *    notice, this list of conditions and the following disclaimer in the
14a9387eb1SOleksandr Tymoshenko  *    documentation and/or other materials provided with the distribution.
15a9387eb1SOleksandr Tymoshenko  *
16a9387eb1SOleksandr Tymoshenko  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17a9387eb1SOleksandr Tymoshenko  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18a9387eb1SOleksandr Tymoshenko  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19a9387eb1SOleksandr Tymoshenko  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20a9387eb1SOleksandr Tymoshenko  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21a9387eb1SOleksandr Tymoshenko  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22a9387eb1SOleksandr Tymoshenko  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23a9387eb1SOleksandr Tymoshenko  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24a9387eb1SOleksandr Tymoshenko  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25a9387eb1SOleksandr Tymoshenko  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26a9387eb1SOleksandr Tymoshenko  * SUCH DAMAGE.
27a9387eb1SOleksandr Tymoshenko  *
28a9387eb1SOleksandr Tymoshenko  */
29fdafd315SWarner Losh 
30a9387eb1SOleksandr Tymoshenko #include <sys/param.h>
31a9387eb1SOleksandr Tymoshenko #include <sys/systm.h>
32a9387eb1SOleksandr Tymoshenko #include <sys/bus.h>
33806ebc9eSMitchell Horne #include <sys/conf.h>
34a9387eb1SOleksandr Tymoshenko #include <sys/kernel.h>
35a9387eb1SOleksandr Tymoshenko #include <sys/lock.h>
36a9387eb1SOleksandr Tymoshenko #include <sys/malloc.h>
37a9387eb1SOleksandr Tymoshenko #include <sys/module.h>
38a9387eb1SOleksandr Tymoshenko #include <sys/mutex.h>
39a9387eb1SOleksandr Tymoshenko #include <sys/rman.h>
408c8f31e7SIan Lepore #include <sys/sysctl.h>
41a9387eb1SOleksandr Tymoshenko #include <sys/taskqueue.h>
42a9387eb1SOleksandr Tymoshenko 
43a9387eb1SOleksandr Tymoshenko #include <machine/bus.h>
44a9387eb1SOleksandr Tymoshenko 
45a9387eb1SOleksandr Tymoshenko #include <dev/ofw/ofw_bus.h>
46a9387eb1SOleksandr Tymoshenko #include <dev/ofw/ofw_bus_subr.h>
47a9387eb1SOleksandr Tymoshenko 
48a9387eb1SOleksandr Tymoshenko #include <dev/mmc/bridge.h>
49a9387eb1SOleksandr Tymoshenko #include <dev/mmc/mmcreg.h>
50b77fd846SAndrew Turner #include <dev/mmc/mmc_fdt_helpers.h>
51a9387eb1SOleksandr Tymoshenko 
52a9387eb1SOleksandr Tymoshenko #include <dev/sdhci/sdhci.h>
53b440e965SMarius Strobl 
54b440e965SMarius Strobl #include "mmcbr_if.h"
55a9387eb1SOleksandr Tymoshenko #include "sdhci_if.h"
56a9387eb1SOleksandr Tymoshenko 
57a94a63f0SWarner Losh #include "opt_mmccam.h"
58a94a63f0SWarner Losh 
59adc99a8aSOleksandr Tymoshenko #include "bcm2835_dma.h"
6027eb3304SAndrew Turner #include <arm/broadcom/bcm2835/bcm2835_mbox_prop.h>
61939f1d8fSKyle Evans #ifdef NOTYET
62939f1d8fSKyle Evans #include <arm/broadcom/bcm2835/bcm2835_clkman.h>
63939f1d8fSKyle Evans #endif
6440084ac3SKyle Evans #include <arm/broadcom/bcm2835/bcm2835_vcbus.h>
65adc99a8aSOleksandr Tymoshenko 
663b37b3c2SOleksandr Tymoshenko #define	BCM2835_DEFAULT_SDHCI_FREQ	50
67939f1d8fSKyle Evans #define	BCM2838_DEFAULT_SDHCI_FREQ	100
683b37b3c2SOleksandr Tymoshenko 
69adc99a8aSOleksandr Tymoshenko #define	BCM_SDHCI_BUFFER_SIZE		512
7055fa224bSKyle Evans /*
7155fa224bSKyle Evans  * NUM_DMA_SEGS is the number of DMA segments we want to accommodate on average.
7255fa224bSKyle Evans  * We add in a number of segments based on how much we may need to spill into
7355fa224bSKyle Evans  * another segment due to crossing page boundaries.  e.g. up to PAGE_SIZE, an
7455fa224bSKyle Evans  * extra page is needed as we can cross a page boundary exactly once.
7555fa224bSKyle Evans  */
7655fa224bSKyle Evans #define	NUM_DMA_SEGS			1
7755fa224bSKyle Evans #define	NUM_DMA_SPILL_SEGS		\
7855fa224bSKyle Evans 	((((NUM_DMA_SEGS * BCM_SDHCI_BUFFER_SIZE) - 1) / PAGE_SIZE) + 1)
7955fa224bSKyle Evans #define	ALLOCATED_DMA_SEGS		(NUM_DMA_SEGS +	NUM_DMA_SPILL_SEGS)
8055fa224bSKyle Evans #define	BCM_DMA_MAXSIZE			(NUM_DMA_SEGS * BCM_SDHCI_BUFFER_SIZE)
81adc99a8aSOleksandr Tymoshenko 
8244cc3f9cSKyle Evans #define	BCM_SDHCI_SLOT_LEFT(slot)	\
8344cc3f9cSKyle Evans 	((slot)->curcmd->data->len - (slot)->offset)
8444cc3f9cSKyle Evans 
8544cc3f9cSKyle Evans #define	BCM_SDHCI_SEGSZ_LEFT(slot)	\
8644cc3f9cSKyle Evans 	min(BCM_DMA_MAXSIZE,		\
8744cc3f9cSKyle Evans 	    rounddown(BCM_SDHCI_SLOT_LEFT(slot), BCM_SDHCI_BUFFER_SIZE))
8844cc3f9cSKyle Evans 
896cd7d8a6SKyle Evans #define	DATA_PENDING_MASK	(SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)
90b61ac067SKyle Evans #define	DATA_XFER_MASK		(DATA_PENDING_MASK | SDHCI_INT_DATA_END)
916cd7d8a6SKyle Evans 
92a9387eb1SOleksandr Tymoshenko #ifdef DEBUG
93fe2825beSBjoern A. Zeeb static int bcm2835_sdhci_debug = 0;
94fe2825beSBjoern A. Zeeb 
95fe2825beSBjoern A. Zeeb TUNABLE_INT("hw.bcm2835.sdhci.debug", &bcm2835_sdhci_debug);
96fe2825beSBjoern A. Zeeb SYSCTL_INT(_hw_sdhci, OID_AUTO, bcm2835_sdhci_debug, CTLFLAG_RWTUN,
97fe2825beSBjoern A. Zeeb     &bcm2835_sdhci_debug, 0, "bcm2835 SDHCI debug level");
98fe2825beSBjoern A. Zeeb 
99fe2825beSBjoern A. Zeeb #define	dprintf(fmt, args...)					\
100fe2825beSBjoern A. Zeeb 	do {							\
101fe2825beSBjoern A. Zeeb 		if (bcm2835_sdhci_debug)			\
102fe2825beSBjoern A. Zeeb 			printf("%s: " fmt, __func__, ##args);	\
103fe2825beSBjoern A. Zeeb 	}  while (0)
104a9387eb1SOleksandr Tymoshenko #else
105a9387eb1SOleksandr Tymoshenko #define dprintf(fmt, args...)
106a9387eb1SOleksandr Tymoshenko #endif
107a9387eb1SOleksandr Tymoshenko 
108bba987dcSIan Lepore static int bcm2835_sdhci_hs = 1;
109382ac7c8SLuiz Otavio O Souza static int bcm2835_sdhci_pio_mode = 0;
110d3d7f709SOleksandr Tymoshenko 
111939f1d8fSKyle Evans struct bcm_mmc_conf {
112939f1d8fSKyle Evans 	int	clock_id;
113939f1d8fSKyle Evans 	int	clock_src;
114939f1d8fSKyle Evans 	int	default_freq;
115939f1d8fSKyle Evans 	int	quirks;
116d7399dfdSKyle Evans 	int	emmc_dreq;
117939f1d8fSKyle Evans };
118939f1d8fSKyle Evans 
119939f1d8fSKyle Evans struct bcm_mmc_conf bcm2835_sdhci_conf = {
120939f1d8fSKyle Evans 	.clock_id	= BCM2835_MBOX_CLOCK_ID_EMMC,
121939f1d8fSKyle Evans 	.clock_src	= -1,
122939f1d8fSKyle Evans 	.default_freq	= BCM2835_DEFAULT_SDHCI_FREQ,
123939f1d8fSKyle Evans 	.quirks		= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
124939f1d8fSKyle Evans 	    SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_DONT_SET_HISPD_BIT |
125939f1d8fSKyle Evans 	    SDHCI_QUIRK_MISSING_CAPS,
126d7399dfdSKyle Evans 	.emmc_dreq	= BCM_DMA_DREQ_EMMC,
127939f1d8fSKyle Evans };
128939f1d8fSKyle Evans 
129939f1d8fSKyle Evans struct bcm_mmc_conf bcm2838_emmc2_conf = {
130939f1d8fSKyle Evans 	.clock_id	= BCM2838_MBOX_CLOCK_ID_EMMC2,
131939f1d8fSKyle Evans 	.clock_src	= -1,
132939f1d8fSKyle Evans 	.default_freq	= BCM2838_DEFAULT_SDHCI_FREQ,
133939f1d8fSKyle Evans 	.quirks		= 0,
134d7399dfdSKyle Evans 	.emmc_dreq	= BCM_DMA_DREQ_NONE,
135939f1d8fSKyle Evans };
136939f1d8fSKyle Evans 
1379d6eb8bbSOleksandr Tymoshenko static struct ofw_compat_data compat_data[] = {
138939f1d8fSKyle Evans 	{"broadcom,bcm2835-sdhci",	(uintptr_t)&bcm2835_sdhci_conf},
139939f1d8fSKyle Evans 	{"brcm,bcm2835-sdhci",		(uintptr_t)&bcm2835_sdhci_conf},
140939f1d8fSKyle Evans 	{"brcm,bcm2835-mmc",		(uintptr_t)&bcm2835_sdhci_conf},
141939f1d8fSKyle Evans 	{"brcm,bcm2711-emmc2",		(uintptr_t)&bcm2838_emmc2_conf},
142939f1d8fSKyle Evans 	{"brcm,bcm2838-emmc2",		(uintptr_t)&bcm2838_emmc2_conf},
1439d6eb8bbSOleksandr Tymoshenko 	{NULL,				0}
1449d6eb8bbSOleksandr Tymoshenko };
1459d6eb8bbSOleksandr Tymoshenko 
146d3d7f709SOleksandr Tymoshenko TUNABLE_INT("hw.bcm2835.sdhci.hs", &bcm2835_sdhci_hs);
147adc99a8aSOleksandr Tymoshenko TUNABLE_INT("hw.bcm2835.sdhci.pio_mode", &bcm2835_sdhci_pio_mode);
148d3d7f709SOleksandr Tymoshenko 
149a9387eb1SOleksandr Tymoshenko struct bcm_sdhci_softc {
150a9387eb1SOleksandr Tymoshenko 	device_t		sc_dev;
151a9387eb1SOleksandr Tymoshenko 	struct resource *	sc_mem_res;
152a9387eb1SOleksandr Tymoshenko 	struct resource *	sc_irq_res;
153a9387eb1SOleksandr Tymoshenko 	bus_space_tag_t		sc_bst;
154a9387eb1SOleksandr Tymoshenko 	bus_space_handle_t	sc_bsh;
155a9387eb1SOleksandr Tymoshenko 	void *			sc_intrhand;
156a9387eb1SOleksandr Tymoshenko 	struct mmc_request *	sc_req;
157a9387eb1SOleksandr Tymoshenko 	struct sdhci_slot	sc_slot;
1588a8166e5SBartlomiej Grzesik 	struct mmc_helper	sc_mmc_helper;
159adc99a8aSOleksandr Tymoshenko 	int			sc_dma_ch;
160adc99a8aSOleksandr Tymoshenko 	bus_dma_tag_t		sc_dma_tag;
161adc99a8aSOleksandr Tymoshenko 	bus_dmamap_t		sc_dma_map;
162b479b38cSIan Lepore 	vm_paddr_t		sc_sdhci_buffer_phys;
16355fa224bSKyle Evans 	bus_addr_t		dmamap_seg_addrs[ALLOCATED_DMA_SEGS];
16455fa224bSKyle Evans 	bus_size_t		dmamap_seg_sizes[ALLOCATED_DMA_SEGS];
165bf160401SIan Lepore 	int			dmamap_seg_count;
166244fe94fSIan Lepore 	int			dmamap_seg_index;
167bf160401SIan Lepore 	int			dmamap_status;
168901491d0SBjoern A. Zeeb 	uint32_t		blksz_and_count;
169901491d0SBjoern A. Zeeb 	uint32_t		cmd_and_mode;
170901491d0SBjoern A. Zeeb 	bool			need_update_blk;
171939f1d8fSKyle Evans #ifdef NOTYET
172939f1d8fSKyle Evans 	device_t		clkman;
173939f1d8fSKyle Evans #endif
174939f1d8fSKyle Evans 	struct bcm_mmc_conf *	conf;
175a9387eb1SOleksandr Tymoshenko };
176a9387eb1SOleksandr Tymoshenko 
177a9387eb1SOleksandr Tymoshenko static int bcm_sdhci_probe(device_t);
178a9387eb1SOleksandr Tymoshenko static int bcm_sdhci_attach(device_t);
179a9387eb1SOleksandr Tymoshenko static int bcm_sdhci_detach(device_t);
180a9387eb1SOleksandr Tymoshenko static void bcm_sdhci_intr(void *);
181a9387eb1SOleksandr Tymoshenko 
182a9387eb1SOleksandr Tymoshenko static int bcm_sdhci_get_ro(device_t, device_t);
183adc99a8aSOleksandr Tymoshenko static void bcm_sdhci_dma_intr(int ch, void *arg);
18444cc3f9cSKyle Evans static void bcm_sdhci_start_dma(struct sdhci_slot *slot);
185a9387eb1SOleksandr Tymoshenko 
186adc99a8aSOleksandr Tymoshenko static void
bcm_sdhci_dmacb(void * arg,bus_dma_segment_t * segs,int nseg,int err)187bf160401SIan Lepore bcm_sdhci_dmacb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
188adc99a8aSOleksandr Tymoshenko {
189bf160401SIan Lepore 	struct bcm_sdhci_softc *sc = arg;
190bf160401SIan Lepore 	int i;
191adc99a8aSOleksandr Tymoshenko 
192c22f8ca6SKyle Evans 	/* Sanity check: we can only ever have one mapping at a time. */
193c22f8ca6SKyle Evans 	KASSERT(sc->dmamap_seg_count == 0, ("leaked DMA segment"));
194bf160401SIan Lepore 	sc->dmamap_status = err;
195bf160401SIan Lepore 	sc->dmamap_seg_count = nseg;
196adc99a8aSOleksandr Tymoshenko 
197bf160401SIan Lepore 	/* Note nseg is guaranteed to be zero if err is non-zero. */
198bf160401SIan Lepore 	for (i = 0; i < nseg; i++) {
199bf160401SIan Lepore 		sc->dmamap_seg_addrs[i] = segs[i].ds_addr;
200bf160401SIan Lepore 		sc->dmamap_seg_sizes[i] = segs[i].ds_len;
201bf160401SIan Lepore 	}
202adc99a8aSOleksandr Tymoshenko }
203adc99a8aSOleksandr Tymoshenko 
204a9387eb1SOleksandr Tymoshenko static int
bcm_sdhci_probe(device_t dev)205a9387eb1SOleksandr Tymoshenko bcm_sdhci_probe(device_t dev)
206a9387eb1SOleksandr Tymoshenko {
207add35ed5SIan Lepore 
208add35ed5SIan Lepore 	if (!ofw_bus_status_okay(dev))
209add35ed5SIan Lepore 		return (ENXIO);
210add35ed5SIan Lepore 
2119d6eb8bbSOleksandr Tymoshenko 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
212a9387eb1SOleksandr Tymoshenko 		return (ENXIO);
213a9387eb1SOleksandr Tymoshenko 
214a9387eb1SOleksandr Tymoshenko 	device_set_desc(dev, "Broadcom 2708 SDHCI controller");
2159d6eb8bbSOleksandr Tymoshenko 
216a9387eb1SOleksandr Tymoshenko 	return (BUS_PROBE_DEFAULT);
217a9387eb1SOleksandr Tymoshenko }
218a9387eb1SOleksandr Tymoshenko 
219a9387eb1SOleksandr Tymoshenko static int
bcm_sdhci_attach(device_t dev)220a9387eb1SOleksandr Tymoshenko bcm_sdhci_attach(device_t dev)
221a9387eb1SOleksandr Tymoshenko {
222a9387eb1SOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
223a9387eb1SOleksandr Tymoshenko 	int rid, err;
2243b37b3c2SOleksandr Tymoshenko 	phandle_t node;
2253b37b3c2SOleksandr Tymoshenko 	pcell_t cell;
22627eb3304SAndrew Turner 	u_int default_freq;
227a9387eb1SOleksandr Tymoshenko 
228a9387eb1SOleksandr Tymoshenko 	sc->sc_dev = dev;
229a9387eb1SOleksandr Tymoshenko 	sc->sc_req = NULL;
230a9387eb1SOleksandr Tymoshenko 
231939f1d8fSKyle Evans 	sc->conf = (struct bcm_mmc_conf *)ofw_bus_search_compatible(dev,
232939f1d8fSKyle Evans 	    compat_data)->ocd_data;
233939f1d8fSKyle Evans 	if (sc->conf == 0)
234939f1d8fSKyle Evans 	    return (ENXIO);
235939f1d8fSKyle Evans 
236939f1d8fSKyle Evans 	err = bcm2835_mbox_set_power_state(BCM2835_MBOX_POWER_ID_EMMC, TRUE);
23727eb3304SAndrew Turner 	if (err != 0) {
23827eb3304SAndrew Turner 		if (bootverbose)
23927eb3304SAndrew Turner 			device_printf(dev, "Unable to enable the power\n");
24027eb3304SAndrew Turner 		return (err);
24127eb3304SAndrew Turner 	}
24227eb3304SAndrew Turner 
24327eb3304SAndrew Turner 	default_freq = 0;
244939f1d8fSKyle Evans 	err = bcm2835_mbox_get_clock_rate(sc->conf->clock_id, &default_freq);
24527eb3304SAndrew Turner 	if (err == 0) {
24627eb3304SAndrew Turner 		/* Convert to MHz */
24727eb3304SAndrew Turner 		default_freq /= 1000000;
248b7fbc369SLuiz Otavio O Souza 	}
249b7fbc369SLuiz Otavio O Souza 	if (default_freq == 0) {
250b7fbc369SLuiz Otavio O Souza 		node = ofw_bus_get_node(sc->sc_dev);
251b7fbc369SLuiz Otavio O Souza 		if ((OF_getencprop(node, "clock-frequency", &cell,
252b7fbc369SLuiz Otavio O Souza 		    sizeof(cell))) > 0)
253b7fbc369SLuiz Otavio O Souza 			default_freq = cell / 1000000;
25427eb3304SAndrew Turner 	}
25527eb3304SAndrew Turner 	if (default_freq == 0)
256939f1d8fSKyle Evans 		default_freq = sc->conf->default_freq;
25727eb3304SAndrew Turner 
25827eb3304SAndrew Turner 	if (bootverbose)
25927eb3304SAndrew Turner 		device_printf(dev, "SDHCI frequency: %dMHz\n", default_freq);
260939f1d8fSKyle Evans #ifdef NOTYET
261939f1d8fSKyle Evans 	if (sc->conf->clock_src > 0) {
262939f1d8fSKyle Evans 		uint32_t f;
263ddf5b0fbSKyle Evans 		sc->clkman = devclass_get_device(
264ddf5b0fbSKyle Evans 		    devclass_find("bcm2835_clkman"), 0);
265939f1d8fSKyle Evans 		if (sc->clkman == NULL) {
266939f1d8fSKyle Evans 			device_printf(dev, "cannot find Clock Manager\n");
267939f1d8fSKyle Evans 			return (ENXIO);
268939f1d8fSKyle Evans 		}
269939f1d8fSKyle Evans 
270ddf5b0fbSKyle Evans 		f = bcm2835_clkman_set_frequency(sc->clkman,
271ddf5b0fbSKyle Evans 		    sc->conf->clock_src, default_freq);
272939f1d8fSKyle Evans 		if (f == 0)
273939f1d8fSKyle Evans 			return (EINVAL);
274939f1d8fSKyle Evans 
275939f1d8fSKyle Evans 		if (bootverbose)
276ddf5b0fbSKyle Evans 			device_printf(dev, "Clock source frequency: %dMHz\n",
277ddf5b0fbSKyle Evans 			    f);
278939f1d8fSKyle Evans 	}
279939f1d8fSKyle Evans #endif
2803b37b3c2SOleksandr Tymoshenko 
281a9387eb1SOleksandr Tymoshenko 	rid = 0;
282a9387eb1SOleksandr Tymoshenko 	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
283a9387eb1SOleksandr Tymoshenko 	    RF_ACTIVE);
284a9387eb1SOleksandr Tymoshenko 	if (!sc->sc_mem_res) {
285a9387eb1SOleksandr Tymoshenko 		device_printf(dev, "cannot allocate memory window\n");
286a9387eb1SOleksandr Tymoshenko 		err = ENXIO;
287a9387eb1SOleksandr Tymoshenko 		goto fail;
288a9387eb1SOleksandr Tymoshenko 	}
289a9387eb1SOleksandr Tymoshenko 
290a9387eb1SOleksandr Tymoshenko 	sc->sc_bst = rman_get_bustag(sc->sc_mem_res);
291a9387eb1SOleksandr Tymoshenko 	sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res);
292a9387eb1SOleksandr Tymoshenko 
293a9387eb1SOleksandr Tymoshenko 	rid = 0;
294a9387eb1SOleksandr Tymoshenko 	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
295939f1d8fSKyle Evans 	    RF_ACTIVE | RF_SHAREABLE);
296a9387eb1SOleksandr Tymoshenko 	if (!sc->sc_irq_res) {
297a9387eb1SOleksandr Tymoshenko 		device_printf(dev, "cannot allocate interrupt\n");
298a9387eb1SOleksandr Tymoshenko 		err = ENXIO;
299a9387eb1SOleksandr Tymoshenko 		goto fail;
300a9387eb1SOleksandr Tymoshenko 	}
301a9387eb1SOleksandr Tymoshenko 
302b479b38cSIan Lepore 	if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
30307c7a520SLuiz Otavio O Souza 	    NULL, bcm_sdhci_intr, sc, &sc->sc_intrhand)) {
304a9387eb1SOleksandr Tymoshenko 		device_printf(dev, "cannot setup interrupt handler\n");
305a9387eb1SOleksandr Tymoshenko 		err = ENXIO;
306a9387eb1SOleksandr Tymoshenko 		goto fail;
307a9387eb1SOleksandr Tymoshenko 	}
308a9387eb1SOleksandr Tymoshenko 
309adc99a8aSOleksandr Tymoshenko 	if (!bcm2835_sdhci_pio_mode)
310adc99a8aSOleksandr Tymoshenko 		sc->sc_slot.opt = SDHCI_PLATFORM_TRANSFER;
311adc99a8aSOleksandr Tymoshenko 
312d3d7f709SOleksandr Tymoshenko 	sc->sc_slot.caps = SDHCI_CAN_VDD_330 | SDHCI_CAN_VDD_180;
313d3d7f709SOleksandr Tymoshenko 	if (bcm2835_sdhci_hs)
314d3d7f709SOleksandr Tymoshenko 		sc->sc_slot.caps |= SDHCI_CAN_DO_HISPD;
3153b37b3c2SOleksandr Tymoshenko 	sc->sc_slot.caps |= (default_freq << SDHCI_CLOCK_BASE_SHIFT);
316939f1d8fSKyle Evans 	sc->sc_slot.quirks = sc->conf->quirks;
317a9387eb1SOleksandr Tymoshenko 
318a9387eb1SOleksandr Tymoshenko 	sdhci_init_slot(dev, &sc->sc_slot, 0);
319b77fd846SAndrew Turner 	mmc_fdt_parse(dev, 0, &sc->sc_mmc_helper, &sc->sc_slot.host);
320a9387eb1SOleksandr Tymoshenko 
321adc99a8aSOleksandr Tymoshenko 	sc->sc_dma_ch = bcm_dma_allocate(BCM_DMA_CH_ANY);
322adc99a8aSOleksandr Tymoshenko 	if (sc->sc_dma_ch == BCM_DMA_CH_INVALID)
323adc99a8aSOleksandr Tymoshenko 		goto fail;
324adc99a8aSOleksandr Tymoshenko 
325ddf5b0fbSKyle Evans 	err = bcm_dma_setup_intr(sc->sc_dma_ch, bcm_sdhci_dma_intr, sc);
326ddf5b0fbSKyle Evans 	if (err != 0) {
327ddf5b0fbSKyle Evans 		device_printf(dev,
328ddf5b0fbSKyle Evans 		    "cannot setup dma interrupt handler\n");
329939f1d8fSKyle Evans 		err = ENXIO;
330939f1d8fSKyle Evans 		goto fail;
331939f1d8fSKyle Evans 	}
332adc99a8aSOleksandr Tymoshenko 
333b479b38cSIan Lepore 	/* Allocate bus_dma resources. */
334adc99a8aSOleksandr Tymoshenko 	err = bus_dma_tag_create(bus_get_dma_tag(dev),
33540084ac3SKyle Evans 	    1, 0, bcm283x_dmabus_peripheral_lowaddr(),
336adc99a8aSOleksandr Tymoshenko 	    BUS_SPACE_MAXADDR, NULL, NULL,
33755fa224bSKyle Evans 	    BCM_DMA_MAXSIZE, ALLOCATED_DMA_SEGS, BCM_SDHCI_BUFFER_SIZE,
338adc99a8aSOleksandr Tymoshenko 	    BUS_DMA_ALLOCNOW, NULL, NULL,
339adc99a8aSOleksandr Tymoshenko 	    &sc->sc_dma_tag);
340adc99a8aSOleksandr Tymoshenko 
341adc99a8aSOleksandr Tymoshenko 	if (err) {
342adc99a8aSOleksandr Tymoshenko 		device_printf(dev, "failed allocate DMA tag");
343adc99a8aSOleksandr Tymoshenko 		goto fail;
344adc99a8aSOleksandr Tymoshenko 	}
345adc99a8aSOleksandr Tymoshenko 
346b479b38cSIan Lepore 	err = bus_dmamap_create(sc->sc_dma_tag, 0, &sc->sc_dma_map);
347adc99a8aSOleksandr Tymoshenko 	if (err) {
348b479b38cSIan Lepore 		device_printf(dev, "bus_dmamap_create failed\n");
349adc99a8aSOleksandr Tymoshenko 		goto fail;
350adc99a8aSOleksandr Tymoshenko 	}
351adc99a8aSOleksandr Tymoshenko 
3528ff1636cSOleksandr Tymoshenko 	/* FIXME: Fix along with other BUS_SPACE_PHYSADDR instances */
3538ff1636cSOleksandr Tymoshenko 	sc->sc_sdhci_buffer_phys = rman_get_start(sc->sc_mem_res) +
3548ff1636cSOleksandr Tymoshenko 	    SDHCI_BUFFER;
355adc99a8aSOleksandr Tymoshenko 
356723da5d9SJohn Baldwin 	bus_identify_children(dev);
357*18250ec6SJohn Baldwin 	bus_attach_children(dev);
358a9387eb1SOleksandr Tymoshenko 
359a9387eb1SOleksandr Tymoshenko 	sdhci_start_slot(&sc->sc_slot);
360a9387eb1SOleksandr Tymoshenko 
361901491d0SBjoern A. Zeeb 	/* Seed our copies. */
362901491d0SBjoern A. Zeeb 	sc->blksz_and_count = SDHCI_READ_4(dev, &sc->sc_slot, SDHCI_BLOCK_SIZE);
363901491d0SBjoern A. Zeeb 	sc->cmd_and_mode = SDHCI_READ_4(dev, &sc->sc_slot, SDHCI_TRANSFER_MODE);
364901491d0SBjoern A. Zeeb 
365a9387eb1SOleksandr Tymoshenko 	return (0);
366a9387eb1SOleksandr Tymoshenko 
367a9387eb1SOleksandr Tymoshenko fail:
368a9387eb1SOleksandr Tymoshenko 	if (sc->sc_intrhand)
369a9387eb1SOleksandr Tymoshenko 		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand);
370a9387eb1SOleksandr Tymoshenko 	if (sc->sc_irq_res)
371a9387eb1SOleksandr Tymoshenko 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
372a9387eb1SOleksandr Tymoshenko 	if (sc->sc_mem_res)
373a9387eb1SOleksandr Tymoshenko 		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
374a9387eb1SOleksandr Tymoshenko 
375a9387eb1SOleksandr Tymoshenko 	return (err);
376a9387eb1SOleksandr Tymoshenko }
377a9387eb1SOleksandr Tymoshenko 
378a9387eb1SOleksandr Tymoshenko static int
bcm_sdhci_detach(device_t dev)379a9387eb1SOleksandr Tymoshenko bcm_sdhci_detach(device_t dev)
380a9387eb1SOleksandr Tymoshenko {
381a9387eb1SOleksandr Tymoshenko 
382a9387eb1SOleksandr Tymoshenko 	return (EBUSY);
383a9387eb1SOleksandr Tymoshenko }
384a9387eb1SOleksandr Tymoshenko 
385a9387eb1SOleksandr Tymoshenko static void
bcm_sdhci_intr(void * arg)386a9387eb1SOleksandr Tymoshenko bcm_sdhci_intr(void *arg)
387a9387eb1SOleksandr Tymoshenko {
388a9387eb1SOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = arg;
389a9387eb1SOleksandr Tymoshenko 
390a9387eb1SOleksandr Tymoshenko 	sdhci_generic_intr(&sc->sc_slot);
391a9387eb1SOleksandr Tymoshenko }
392a9387eb1SOleksandr Tymoshenko 
393a9387eb1SOleksandr Tymoshenko static int
bcm_sdhci_update_ios(device_t bus,device_t child)394b77fd846SAndrew Turner bcm_sdhci_update_ios(device_t bus, device_t child)
395b77fd846SAndrew Turner {
396b77fd846SAndrew Turner 	struct bcm_sdhci_softc *sc;
397b77fd846SAndrew Turner 	struct mmc_ios *ios;
398b77fd846SAndrew Turner 	int rv;
399b77fd846SAndrew Turner 
400b77fd846SAndrew Turner 	sc = device_get_softc(bus);
401b77fd846SAndrew Turner 	ios = &sc->sc_slot.host.ios;
402b77fd846SAndrew Turner 
403b77fd846SAndrew Turner 	if (ios->power_mode == power_up) {
404b77fd846SAndrew Turner 		if (sc->sc_mmc_helper.vmmc_supply)
405b77fd846SAndrew Turner 			regulator_enable(sc->sc_mmc_helper.vmmc_supply);
406b77fd846SAndrew Turner 		if (sc->sc_mmc_helper.vqmmc_supply)
407b77fd846SAndrew Turner 			regulator_enable(sc->sc_mmc_helper.vqmmc_supply);
408b77fd846SAndrew Turner 	}
409b77fd846SAndrew Turner 
410b77fd846SAndrew Turner 	rv = sdhci_generic_update_ios(bus, child);
411b77fd846SAndrew Turner 	if (rv != 0)
412b77fd846SAndrew Turner 		return (rv);
413b77fd846SAndrew Turner 
414b77fd846SAndrew Turner 	if (ios->power_mode == power_off) {
415b77fd846SAndrew Turner 		if (sc->sc_mmc_helper.vmmc_supply)
416b77fd846SAndrew Turner 			regulator_disable(sc->sc_mmc_helper.vmmc_supply);
417b77fd846SAndrew Turner 		if (sc->sc_mmc_helper.vqmmc_supply)
418b77fd846SAndrew Turner 			regulator_disable(sc->sc_mmc_helper.vqmmc_supply);
419b77fd846SAndrew Turner 	}
420b77fd846SAndrew Turner 
421b77fd846SAndrew Turner 	return (0);
422b77fd846SAndrew Turner }
423b77fd846SAndrew Turner 
424b77fd846SAndrew Turner static int
bcm_sdhci_get_ro(device_t bus,device_t child)425a9387eb1SOleksandr Tymoshenko bcm_sdhci_get_ro(device_t bus, device_t child)
426a9387eb1SOleksandr Tymoshenko {
427a9387eb1SOleksandr Tymoshenko 
428a9387eb1SOleksandr Tymoshenko 	return (0);
429a9387eb1SOleksandr Tymoshenko }
430a9387eb1SOleksandr Tymoshenko 
431a9387eb1SOleksandr Tymoshenko static inline uint32_t
RD4(struct bcm_sdhci_softc * sc,bus_size_t off)432a9387eb1SOleksandr Tymoshenko RD4(struct bcm_sdhci_softc *sc, bus_size_t off)
433a9387eb1SOleksandr Tymoshenko {
434a9387eb1SOleksandr Tymoshenko 	uint32_t val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, off);
435a9387eb1SOleksandr Tymoshenko 	return val;
436a9387eb1SOleksandr Tymoshenko }
437a9387eb1SOleksandr Tymoshenko 
438a9387eb1SOleksandr Tymoshenko static inline void
WR4(struct bcm_sdhci_softc * sc,bus_size_t off,uint32_t val)439a9387eb1SOleksandr Tymoshenko WR4(struct bcm_sdhci_softc *sc, bus_size_t off, uint32_t val)
440a9387eb1SOleksandr Tymoshenko {
4417c26b0a7SLuiz Otavio O Souza 
442a9387eb1SOleksandr Tymoshenko 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, val);
4437c26b0a7SLuiz Otavio O Souza 	/*
4447c26b0a7SLuiz Otavio O Souza 	 * The Arasan HC has a bug where it may lose the content of
4457c26b0a7SLuiz Otavio O Souza 	 * consecutive writes to registers that are within two SD-card
4467c26b0a7SLuiz Otavio O Souza 	 * clock cycles of each other (a clock domain crossing problem).
4477c26b0a7SLuiz Otavio O Souza 	 */
4487c26b0a7SLuiz Otavio O Souza 	if (sc->sc_slot.clock > 0)
4497c26b0a7SLuiz Otavio O Souza 		DELAY(((2 * 1000000) / sc->sc_slot.clock) + 1);
450a9387eb1SOleksandr Tymoshenko }
451a9387eb1SOleksandr Tymoshenko 
452a9387eb1SOleksandr Tymoshenko static uint8_t
bcm_sdhci_read_1(device_t dev,struct sdhci_slot * slot,bus_size_t off)453a9387eb1SOleksandr Tymoshenko bcm_sdhci_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off)
454a9387eb1SOleksandr Tymoshenko {
455a9387eb1SOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
456a9387eb1SOleksandr Tymoshenko 	uint32_t val = RD4(sc, off & ~3);
457a9387eb1SOleksandr Tymoshenko 
458a9387eb1SOleksandr Tymoshenko 	return ((val >> (off & 3)*8) & 0xff);
459a9387eb1SOleksandr Tymoshenko }
460a9387eb1SOleksandr Tymoshenko 
461a9387eb1SOleksandr Tymoshenko static uint16_t
bcm_sdhci_read_2(device_t dev,struct sdhci_slot * slot,bus_size_t off)462a9387eb1SOleksandr Tymoshenko bcm_sdhci_read_2(device_t dev, struct sdhci_slot *slot, bus_size_t off)
463a9387eb1SOleksandr Tymoshenko {
464a9387eb1SOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
465901491d0SBjoern A. Zeeb 	uint32_t val32;
466a9387eb1SOleksandr Tymoshenko 
467bffed0e9SIan Lepore 	/*
468901491d0SBjoern A. Zeeb 	 * Standard 32-bit handling of command and transfer mode, as
469901491d0SBjoern A. Zeeb 	 * well as block size and count.
470bffed0e9SIan Lepore 	 */
471901491d0SBjoern A. Zeeb 	if ((off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) &&
472901491d0SBjoern A. Zeeb 	    sc->need_update_blk)
473901491d0SBjoern A. Zeeb 		val32 = sc->blksz_and_count;
474901491d0SBjoern A. Zeeb 	else if (off == SDHCI_TRANSFER_MODE || off == SDHCI_COMMAND_FLAGS)
475901491d0SBjoern A. Zeeb 		val32 = sc->cmd_and_mode;
476901491d0SBjoern A. Zeeb 	else
477901491d0SBjoern A. Zeeb 		val32 = RD4(sc, off & ~3);
478901491d0SBjoern A. Zeeb 
479901491d0SBjoern A. Zeeb 	return ((val32 >> (off & 3)*8) & 0xffff);
480a9387eb1SOleksandr Tymoshenko }
481a9387eb1SOleksandr Tymoshenko 
482a9387eb1SOleksandr Tymoshenko static uint32_t
bcm_sdhci_read_4(device_t dev,struct sdhci_slot * slot,bus_size_t off)483a9387eb1SOleksandr Tymoshenko bcm_sdhci_read_4(device_t dev, struct sdhci_slot *slot, bus_size_t off)
484a9387eb1SOleksandr Tymoshenko {
485a9387eb1SOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
486a9387eb1SOleksandr Tymoshenko 
487a9387eb1SOleksandr Tymoshenko 	return RD4(sc, off);
488a9387eb1SOleksandr Tymoshenko }
489a9387eb1SOleksandr Tymoshenko 
490a9387eb1SOleksandr Tymoshenko static void
bcm_sdhci_read_multi_4(device_t dev,struct sdhci_slot * slot,bus_size_t off,uint32_t * data,bus_size_t count)491a9387eb1SOleksandr Tymoshenko bcm_sdhci_read_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off,
492a9387eb1SOleksandr Tymoshenko     uint32_t *data, bus_size_t count)
493a9387eb1SOleksandr Tymoshenko {
494a9387eb1SOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
495a9387eb1SOleksandr Tymoshenko 
496a9387eb1SOleksandr Tymoshenko 	bus_space_read_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count);
497a9387eb1SOleksandr Tymoshenko }
498a9387eb1SOleksandr Tymoshenko 
499a9387eb1SOleksandr Tymoshenko static void
bcm_sdhci_write_1(device_t dev,struct sdhci_slot * slot,bus_size_t off,uint8_t val)500ddf5b0fbSKyle Evans bcm_sdhci_write_1(device_t dev, struct sdhci_slot *slot, bus_size_t off,
501ddf5b0fbSKyle Evans     uint8_t val)
502a9387eb1SOleksandr Tymoshenko {
503a9387eb1SOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
504a9387eb1SOleksandr Tymoshenko 	uint32_t val32 = RD4(sc, off & ~3);
505a9387eb1SOleksandr Tymoshenko 	val32 &= ~(0xff << (off & 3)*8);
506a9387eb1SOleksandr Tymoshenko 	val32 |= (val << (off & 3)*8);
507a9387eb1SOleksandr Tymoshenko 	WR4(sc, off & ~3, val32);
508a9387eb1SOleksandr Tymoshenko }
509a9387eb1SOleksandr Tymoshenko 
510a9387eb1SOleksandr Tymoshenko static void
bcm_sdhci_write_2(device_t dev,struct sdhci_slot * slot,bus_size_t off,uint16_t val)511ddf5b0fbSKyle Evans bcm_sdhci_write_2(device_t dev, struct sdhci_slot *slot, bus_size_t off,
512ddf5b0fbSKyle Evans     uint16_t val)
513a9387eb1SOleksandr Tymoshenko {
514a9387eb1SOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
515a9387eb1SOleksandr Tymoshenko 	uint32_t val32;
516901491d0SBjoern A. Zeeb 
517901491d0SBjoern A. Zeeb 	/*
518901491d0SBjoern A. Zeeb 	 * If we have a queued up 16bit value for blk size or count, use and
519901491d0SBjoern A. Zeeb 	 * update the saved value rather than doing any real register access.
520901491d0SBjoern A. Zeeb 	 * If we did not touch either since the last write, then read from
521901491d0SBjoern A. Zeeb 	 * register as at least block count can change.
522901491d0SBjoern A. Zeeb 	 * Similarly, if we are about to issue a command, always use the saved
523901491d0SBjoern A. Zeeb 	 * value for transfer mode as we can never write that without issuing
524901491d0SBjoern A. Zeeb 	 * a command.
525901491d0SBjoern A. Zeeb 	 */
526901491d0SBjoern A. Zeeb 	if ((off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) &&
527901491d0SBjoern A. Zeeb 	    sc->need_update_blk)
528901491d0SBjoern A. Zeeb 		val32 = sc->blksz_and_count;
529901491d0SBjoern A. Zeeb 	else if (off == SDHCI_COMMAND_FLAGS)
530bffed0e9SIan Lepore 		val32 = sc->cmd_and_mode;
531a9387eb1SOleksandr Tymoshenko 	else
532a9387eb1SOleksandr Tymoshenko 		val32 = RD4(sc, off & ~3);
533901491d0SBjoern A. Zeeb 
534a9387eb1SOleksandr Tymoshenko 	val32 &= ~(0xffff << (off & 3)*8);
535a9387eb1SOleksandr Tymoshenko 	val32 |= (val << (off & 3)*8);
536901491d0SBjoern A. Zeeb 
537a9387eb1SOleksandr Tymoshenko 	if (off == SDHCI_TRANSFER_MODE)
538bffed0e9SIan Lepore 		sc->cmd_and_mode = val32;
539901491d0SBjoern A. Zeeb 	else if (off == SDHCI_BLOCK_SIZE || off == SDHCI_BLOCK_COUNT) {
540901491d0SBjoern A. Zeeb 		sc->blksz_and_count = val32;
541901491d0SBjoern A. Zeeb 		sc->need_update_blk = true;
542901491d0SBjoern A. Zeeb 	} else {
543901491d0SBjoern A. Zeeb 		if (off == SDHCI_COMMAND_FLAGS) {
544901491d0SBjoern A. Zeeb 			/* If we saved blk writes, do them now before cmd. */
545901491d0SBjoern A. Zeeb 			if (sc->need_update_blk) {
546901491d0SBjoern A. Zeeb 				WR4(sc, SDHCI_BLOCK_SIZE, sc->blksz_and_count);
547901491d0SBjoern A. Zeeb 				sc->need_update_blk = false;
548901491d0SBjoern A. Zeeb 			}
549901491d0SBjoern A. Zeeb 			/* Always save cmd and mode registers. */
55086ee58d9SIan Lepore 			sc->cmd_and_mode = val32;
55186ee58d9SIan Lepore 		}
552901491d0SBjoern A. Zeeb 		WR4(sc, off & ~3, val32);
553901491d0SBjoern A. Zeeb 	}
554a9387eb1SOleksandr Tymoshenko }
555a9387eb1SOleksandr Tymoshenko 
556a9387eb1SOleksandr Tymoshenko static void
bcm_sdhci_write_4(device_t dev,struct sdhci_slot * slot,bus_size_t off,uint32_t val)557ddf5b0fbSKyle Evans bcm_sdhci_write_4(device_t dev, struct sdhci_slot *slot, bus_size_t off,
558ddf5b0fbSKyle Evans     uint32_t val)
559a9387eb1SOleksandr Tymoshenko {
560a9387eb1SOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
561a9387eb1SOleksandr Tymoshenko 	WR4(sc, off, val);
562a9387eb1SOleksandr Tymoshenko }
563a9387eb1SOleksandr Tymoshenko 
564a9387eb1SOleksandr Tymoshenko static void
bcm_sdhci_write_multi_4(device_t dev,struct sdhci_slot * slot,bus_size_t off,uint32_t * data,bus_size_t count)565a9387eb1SOleksandr Tymoshenko bcm_sdhci_write_multi_4(device_t dev, struct sdhci_slot *slot, bus_size_t off,
566a9387eb1SOleksandr Tymoshenko     uint32_t *data, bus_size_t count)
567a9387eb1SOleksandr Tymoshenko {
568a9387eb1SOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = device_get_softc(dev);
569a9387eb1SOleksandr Tymoshenko 
570a9387eb1SOleksandr Tymoshenko 	bus_space_write_multi_4(sc->sc_bst, sc->sc_bsh, off, data, count);
571a9387eb1SOleksandr Tymoshenko }
572a9387eb1SOleksandr Tymoshenko 
573adc99a8aSOleksandr Tymoshenko static void
bcm_sdhci_start_dma_seg(struct bcm_sdhci_softc * sc)574244fe94fSIan Lepore bcm_sdhci_start_dma_seg(struct bcm_sdhci_softc *sc)
575244fe94fSIan Lepore {
576244fe94fSIan Lepore 	struct sdhci_slot *slot;
577244fe94fSIan Lepore 	vm_paddr_t pdst, psrc;
57869c595edSJohn Baldwin 	int err __diagused, idx, len, sync_op, width;
579244fe94fSIan Lepore 
580244fe94fSIan Lepore 	slot = &sc->sc_slot;
5810f53b527SKyle Evans 	mtx_assert(&slot->mtx, MA_OWNED);
582244fe94fSIan Lepore 	idx = sc->dmamap_seg_index++;
583244fe94fSIan Lepore 	len = sc->dmamap_seg_sizes[idx];
584244fe94fSIan Lepore 	slot->offset += len;
585939f1d8fSKyle Evans 	width = (len & 0xf ? BCM_DMA_32BIT : BCM_DMA_128BIT);
586244fe94fSIan Lepore 
587244fe94fSIan Lepore 	if (slot->curcmd->data->flags & MMC_DATA_READ) {
588d7399dfdSKyle Evans 		/*
589d7399dfdSKyle Evans 		 * Peripherals on the AXI bus do not need DREQ pacing for reads
590d7399dfdSKyle Evans 		 * from the ARM core, so we can safely set this to NONE.
591d7399dfdSKyle Evans 		 */
592d7399dfdSKyle Evans 		bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_NONE,
593244fe94fSIan Lepore 		    BCM_DMA_SAME_ADDR, BCM_DMA_32BIT);
594244fe94fSIan Lepore 		bcm_dma_setup_dst(sc->sc_dma_ch, BCM_DMA_DREQ_NONE,
595939f1d8fSKyle Evans 		    BCM_DMA_INC_ADDR, width);
596244fe94fSIan Lepore 		psrc = sc->sc_sdhci_buffer_phys;
597244fe94fSIan Lepore 		pdst = sc->dmamap_seg_addrs[idx];
598244fe94fSIan Lepore 		sync_op = BUS_DMASYNC_PREREAD;
599244fe94fSIan Lepore 	} else {
600d7399dfdSKyle Evans 		/*
601d7399dfdSKyle Evans 		 * The ordering here is important, because the last write to
602d7399dfdSKyle Evans 		 * dst/src in the dma control block writes the real dreq value.
603d7399dfdSKyle Evans 		 */
604244fe94fSIan Lepore 		bcm_dma_setup_src(sc->sc_dma_ch, BCM_DMA_DREQ_NONE,
605939f1d8fSKyle Evans 		    BCM_DMA_INC_ADDR, width);
606d7399dfdSKyle Evans 		bcm_dma_setup_dst(sc->sc_dma_ch, sc->conf->emmc_dreq,
607244fe94fSIan Lepore 		    BCM_DMA_SAME_ADDR, BCM_DMA_32BIT);
608244fe94fSIan Lepore 		psrc = sc->dmamap_seg_addrs[idx];
609244fe94fSIan Lepore 		pdst = sc->sc_sdhci_buffer_phys;
610244fe94fSIan Lepore 		sync_op = BUS_DMASYNC_PREWRITE;
611244fe94fSIan Lepore 	}
612244fe94fSIan Lepore 
613244fe94fSIan Lepore 	/*
614244fe94fSIan Lepore 	 * When starting a new DMA operation do the busdma sync operation, and
615244fe94fSIan Lepore 	 * disable SDCHI data interrrupts because we'll be driven by DMA
616244fe94fSIan Lepore 	 * interrupts (or SDHCI error interrupts) until the IO is done.
617244fe94fSIan Lepore 	 */
618244fe94fSIan Lepore 	if (idx == 0) {
619244fe94fSIan Lepore 		bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map, sync_op);
62044cc3f9cSKyle Evans 
621b61ac067SKyle Evans 		slot->intmask &= ~DATA_XFER_MASK;
62244cc3f9cSKyle Evans 		bcm_sdhci_write_4(sc->sc_dev, slot, SDHCI_SIGNAL_ENABLE,
623244fe94fSIan Lepore 		    slot->intmask);
624244fe94fSIan Lepore 	}
625244fe94fSIan Lepore 
626244fe94fSIan Lepore 	/*
627244fe94fSIan Lepore 	 * Start the DMA transfer.  Only programming errors (like failing to
628244fe94fSIan Lepore 	 * allocate a channel) cause a non-zero return from bcm_dma_start().
629244fe94fSIan Lepore 	 */
630244fe94fSIan Lepore 	err = bcm_dma_start(sc->sc_dma_ch, psrc, pdst, len);
631244fe94fSIan Lepore 	KASSERT((err == 0), ("bcm2835_sdhci: failed DMA start"));
632244fe94fSIan Lepore }
633244fe94fSIan Lepore 
634244fe94fSIan Lepore static void
bcm_sdhci_dma_exit(struct bcm_sdhci_softc * sc)63544cc3f9cSKyle Evans bcm_sdhci_dma_exit(struct bcm_sdhci_softc *sc)
63644cc3f9cSKyle Evans {
63744cc3f9cSKyle Evans 	struct sdhci_slot *slot = &sc->sc_slot;
63844cc3f9cSKyle Evans 
63944cc3f9cSKyle Evans 	mtx_assert(&slot->mtx, MA_OWNED);
64044cc3f9cSKyle Evans 
64144cc3f9cSKyle Evans 	/* Re-enable interrupts */
642b61ac067SKyle Evans 	slot->intmask |= DATA_XFER_MASK;
64344cc3f9cSKyle Evans 	bcm_sdhci_write_4(slot->bus, slot, SDHCI_SIGNAL_ENABLE,
64444cc3f9cSKyle Evans 	    slot->intmask);
64544cc3f9cSKyle Evans }
64644cc3f9cSKyle Evans 
64744cc3f9cSKyle Evans static void
bcm_sdhci_dma_unload(struct bcm_sdhci_softc * sc)648a8761a2aSKyle Evans bcm_sdhci_dma_unload(struct bcm_sdhci_softc *sc)
649a8761a2aSKyle Evans {
650a8761a2aSKyle Evans 	struct sdhci_slot *slot = &sc->sc_slot;
651a8761a2aSKyle Evans 
652a8761a2aSKyle Evans 	if (sc->dmamap_seg_count == 0)
653a8761a2aSKyle Evans 		return;
654a8761a2aSKyle Evans 	if ((slot->curcmd->data->flags & MMC_DATA_READ) != 0)
655a8761a2aSKyle Evans 		bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map,
656a8761a2aSKyle Evans 		    BUS_DMASYNC_POSTREAD);
657a8761a2aSKyle Evans 	else
658a8761a2aSKyle Evans 		bus_dmamap_sync(sc->sc_dma_tag, sc->sc_dma_map,
659a8761a2aSKyle Evans 		    BUS_DMASYNC_POSTWRITE);
660a8761a2aSKyle Evans 	bus_dmamap_unload(sc->sc_dma_tag, sc->sc_dma_map);
661a8761a2aSKyle Evans 
662a8761a2aSKyle Evans 	sc->dmamap_seg_count = 0;
663a8761a2aSKyle Evans 	sc->dmamap_seg_index = 0;
664a8761a2aSKyle Evans }
665a8761a2aSKyle Evans 
666a8761a2aSKyle Evans static void
bcm_sdhci_dma_intr(int ch,void * arg)667adc99a8aSOleksandr Tymoshenko bcm_sdhci_dma_intr(int ch, void *arg)
668adc99a8aSOleksandr Tymoshenko {
669adc99a8aSOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = (struct bcm_sdhci_softc *)arg;
670adc99a8aSOleksandr Tymoshenko 	struct sdhci_slot *slot = &sc->sc_slot;
6716cd7d8a6SKyle Evans 	uint32_t reg;
672adc99a8aSOleksandr Tymoshenko 
673adc99a8aSOleksandr Tymoshenko 	mtx_lock(&slot->mtx);
67444cc3f9cSKyle Evans 	if (slot->curcmd == NULL)
67544cc3f9cSKyle Evans 		goto out;
676244fe94fSIan Lepore 	/*
677244fe94fSIan Lepore 	 * If there are more segments for the current dma, start the next one.
678244fe94fSIan Lepore 	 * Otherwise unload the dma map and decide what to do next based on the
679244fe94fSIan Lepore 	 * status of the sdhci controller and whether there's more data left.
680244fe94fSIan Lepore 	 */
681244fe94fSIan Lepore 	if (sc->dmamap_seg_index < sc->dmamap_seg_count) {
682244fe94fSIan Lepore 		bcm_sdhci_start_dma_seg(sc);
68344cc3f9cSKyle Evans 		goto out;
684244fe94fSIan Lepore 	}
685244fe94fSIan Lepore 
686a8761a2aSKyle Evans 	bcm_sdhci_dma_unload(sc);
687adc99a8aSOleksandr Tymoshenko 
6889c907eb9SBjoern A. Zeeb 	/*
68944cc3f9cSKyle Evans 	 * If we had no further segments pending, we need to determine how to
69044cc3f9cSKyle Evans 	 * proceed next.  If the 'data/space pending' bit is already set and we
69144cc3f9cSKyle Evans 	 * can continue via DMA, do so.  Otherwise, re-enable interrupts and
69244cc3f9cSKyle Evans 	 * return.
6939c907eb9SBjoern A. Zeeb 	 */
69428b1b80eSKyle Evans 	reg = bcm_sdhci_read_4(slot->bus, slot, SDHCI_INT_STATUS) &
69528b1b80eSKyle Evans 	    DATA_XFER_MASK;
69644cc3f9cSKyle Evans 	if ((reg & DATA_PENDING_MASK) != 0 &&
69744cc3f9cSKyle Evans 	    BCM_SDHCI_SEGSZ_LEFT(slot) >= BCM_SDHCI_BUFFER_SIZE) {
69844cc3f9cSKyle Evans 		/* ACK any pending interrupts */
69944cc3f9cSKyle Evans 		bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS,
70044cc3f9cSKyle Evans 		    DATA_PENDING_MASK);
701adc99a8aSOleksandr Tymoshenko 
70244cc3f9cSKyle Evans 		bcm_sdhci_start_dma(slot);
70344cc3f9cSKyle Evans 		if (slot->curcmd->error != 0) {
704a8761a2aSKyle Evans 			/* We won't recover from this error for this command. */
705a8761a2aSKyle Evans 			bcm_sdhci_dma_unload(sc);
70644cc3f9cSKyle Evans 			bcm_sdhci_dma_exit(sc);
707a8761a2aSKyle Evans 			sdhci_finish_data(slot);
708adc99a8aSOleksandr Tymoshenko 		}
709b61ac067SKyle Evans 	} else if ((reg & SDHCI_INT_DATA_END) != 0) {
710b61ac067SKyle Evans 		bcm_sdhci_dma_exit(sc);
711b61ac067SKyle Evans 		bcm_sdhci_write_4(slot->bus, slot, SDHCI_INT_STATUS,
712b61ac067SKyle Evans 		    reg);
713b61ac067SKyle Evans 		slot->flags &= ~PLATFORM_DATA_STARTED;
714b61ac067SKyle Evans 		sdhci_finish_data(slot);
715adc99a8aSOleksandr Tymoshenko 	} else {
71644cc3f9cSKyle Evans 		bcm_sdhci_dma_exit(sc);
717adc99a8aSOleksandr Tymoshenko 	}
71844cc3f9cSKyle Evans out:
719adc99a8aSOleksandr Tymoshenko 	mtx_unlock(&slot->mtx);
720adc99a8aSOleksandr Tymoshenko }
721adc99a8aSOleksandr Tymoshenko 
722adc99a8aSOleksandr Tymoshenko static void
bcm_sdhci_start_dma(struct sdhci_slot * slot)72344cc3f9cSKyle Evans bcm_sdhci_start_dma(struct sdhci_slot *slot)
724adc99a8aSOleksandr Tymoshenko {
725adc99a8aSOleksandr Tymoshenko 	struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
72644cc3f9cSKyle Evans 	uint8_t *buf;
727adc99a8aSOleksandr Tymoshenko 	size_t left;
728adc99a8aSOleksandr Tymoshenko 
72944cc3f9cSKyle Evans 	mtx_assert(&slot->mtx, MA_OWNED);
730adc99a8aSOleksandr Tymoshenko 
73144cc3f9cSKyle Evans 	left = BCM_SDHCI_SEGSZ_LEFT(slot);
73244cc3f9cSKyle Evans 	buf = (uint8_t *)slot->curcmd->data->data + slot->offset;
73344cc3f9cSKyle Evans 	KASSERT(left != 0,
73444cc3f9cSKyle Evans 	    ("%s: DMA handling incorrectly indicated", __func__));
735adc99a8aSOleksandr Tymoshenko 
73644cc3f9cSKyle Evans 	/*
73744cc3f9cSKyle Evans 	 * No need to check segment count here; if we've not yet unloaded
73844cc3f9cSKyle Evans 	 * previous segments, we'll catch that in bcm_sdhci_dmacb.
73944cc3f9cSKyle Evans 	 */
74044cc3f9cSKyle Evans 	if (bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, buf, left,
741bf160401SIan Lepore 	    bcm_sdhci_dmacb, sc, BUS_DMA_NOWAIT) != 0 ||
742bf160401SIan Lepore 	    sc->dmamap_status != 0) {
743bf160401SIan Lepore 		slot->curcmd->error = MMC_ERR_NO_MEMORY;
744bf160401SIan Lepore 		return;
745bf160401SIan Lepore 	}
746adc99a8aSOleksandr Tymoshenko 
747adc99a8aSOleksandr Tymoshenko 	/* DMA start */
748244fe94fSIan Lepore 	bcm_sdhci_start_dma_seg(sc);
749adc99a8aSOleksandr Tymoshenko }
750adc99a8aSOleksandr Tymoshenko 
751adc99a8aSOleksandr Tymoshenko static int
bcm_sdhci_will_handle_transfer(device_t dev,struct sdhci_slot * slot)752adc99a8aSOleksandr Tymoshenko bcm_sdhci_will_handle_transfer(device_t dev, struct sdhci_slot *slot)
753adc99a8aSOleksandr Tymoshenko {
7548922c2caSKyle Evans #ifdef INVARIANTS
755939f1d8fSKyle Evans 	struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
7568922c2caSKyle Evans #endif
757adc99a8aSOleksandr Tymoshenko 
758b479b38cSIan Lepore 	/*
759806ebc9eSMitchell Horne 	 * We don't want to perform DMA in this context -- interrupts are
760806ebc9eSMitchell Horne 	 * disabled, and a transaction may already be in progress.
761806ebc9eSMitchell Horne 	 */
762806ebc9eSMitchell Horne 	if (dumping)
763806ebc9eSMitchell Horne 		return (0);
764806ebc9eSMitchell Horne 
765806ebc9eSMitchell Horne 	/*
76644cc3f9cSKyle Evans 	 * This indicates that we somehow let a data interrupt slip by into the
76744cc3f9cSKyle Evans 	 * SDHCI framework, when it should not have.  This really needs to be
76844cc3f9cSKyle Evans 	 * caught and fixed ASAP, as it really shouldn't happen.
769b479b38cSIan Lepore 	 */
77044cc3f9cSKyle Evans 	KASSERT(sc->dmamap_seg_count == 0,
77144cc3f9cSKyle Evans 	    ("data pending interrupt pushed through SDHCI framework"));
77244cc3f9cSKyle Evans 
77344cc3f9cSKyle Evans 	/*
77444cc3f9cSKyle Evans 	 * Do not use DMA for transfers less than our block size.  Checking
77544cc3f9cSKyle Evans 	 * alignment serves little benefit, as we round transfer sizes down to
77644cc3f9cSKyle Evans 	 * a multiple of the block size and push the transfer back to
77744cc3f9cSKyle Evans 	 * SDHCI-driven PIO once we're below the block size.
77844cc3f9cSKyle Evans 	 */
77944cc3f9cSKyle Evans 	if (BCM_SDHCI_SEGSZ_LEFT(slot) < BCM_DMA_BLOCK_SIZE)
780b479b38cSIan Lepore 		return (0);
781adc99a8aSOleksandr Tymoshenko 
782adc99a8aSOleksandr Tymoshenko 	return (1);
783adc99a8aSOleksandr Tymoshenko }
784adc99a8aSOleksandr Tymoshenko 
785adc99a8aSOleksandr Tymoshenko static void
bcm_sdhci_start_transfer(device_t dev,struct sdhci_slot * slot,uint32_t * intmask)786adc99a8aSOleksandr Tymoshenko bcm_sdhci_start_transfer(device_t dev, struct sdhci_slot *slot,
787adc99a8aSOleksandr Tymoshenko     uint32_t *intmask)
788adc99a8aSOleksandr Tymoshenko {
789adc99a8aSOleksandr Tymoshenko 
790adc99a8aSOleksandr Tymoshenko 	/* DMA transfer FIFO 1KB */
79144cc3f9cSKyle Evans 	bcm_sdhci_start_dma(slot);
792adc99a8aSOleksandr Tymoshenko }
793adc99a8aSOleksandr Tymoshenko 
794adc99a8aSOleksandr Tymoshenko static void
bcm_sdhci_finish_transfer(device_t dev,struct sdhci_slot * slot)795adc99a8aSOleksandr Tymoshenko bcm_sdhci_finish_transfer(device_t dev, struct sdhci_slot *slot)
796adc99a8aSOleksandr Tymoshenko {
797c22f8ca6SKyle Evans 	struct bcm_sdhci_softc *sc = device_get_softc(slot->bus);
798adc99a8aSOleksandr Tymoshenko 
799b61ac067SKyle Evans 	/*
800b61ac067SKyle Evans 	 * Clean up.  Interrupts are clearly enabled, because we received an
801b61ac067SKyle Evans 	 * SDHCI_INT_DATA_END to get this far -- just make sure we don't leave
802b61ac067SKyle Evans 	 * anything laying around.
803b61ac067SKyle Evans 	 */
804c22f8ca6SKyle Evans 	if (sc->dmamap_seg_count != 0) {
80544cc3f9cSKyle Evans 		/*
80644cc3f9cSKyle Evans 		 * Our segment math should have worked out such that we would
80744cc3f9cSKyle Evans 		 * never finish the transfer without having used up all of the
80844cc3f9cSKyle Evans 		 * segments.  If we haven't, that means we must have erroneously
80944cc3f9cSKyle Evans 		 * regressed to SDHCI-driven PIO to finish the operation and
81044cc3f9cSKyle Evans 		 * this is certainly caused by developer-error.
81144cc3f9cSKyle Evans 		 */
812a8761a2aSKyle Evans 		bcm_sdhci_dma_unload(sc);
813c22f8ca6SKyle Evans 	}
81444cc3f9cSKyle Evans 
815adc99a8aSOleksandr Tymoshenko 	sdhci_finish_data(slot);
816adc99a8aSOleksandr Tymoshenko }
817adc99a8aSOleksandr Tymoshenko 
818a9387eb1SOleksandr Tymoshenko static device_method_t bcm_sdhci_methods[] = {
819a9387eb1SOleksandr Tymoshenko 	/* Device interface */
820a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(device_probe,		bcm_sdhci_probe),
821a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(device_attach,	bcm_sdhci_attach),
822a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(device_detach,	bcm_sdhci_detach),
823a9387eb1SOleksandr Tymoshenko 
824a9387eb1SOleksandr Tymoshenko 	/* Bus interface */
825a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(bus_read_ivar,	sdhci_generic_read_ivar),
826a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(bus_write_ivar,	sdhci_generic_write_ivar),
82704223932SBjoern A. Zeeb 	DEVMETHOD(bus_add_child,	bus_generic_add_child),
828a9387eb1SOleksandr Tymoshenko 
829a9387eb1SOleksandr Tymoshenko 	/* MMC bridge interface */
830b77fd846SAndrew Turner 	DEVMETHOD(mmcbr_update_ios,	bcm_sdhci_update_ios),
831a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(mmcbr_request,	sdhci_generic_request),
832a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(mmcbr_get_ro,		bcm_sdhci_get_ro),
833a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(mmcbr_acquire_host,	sdhci_generic_acquire_host),
834a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(mmcbr_release_host,	sdhci_generic_release_host),
835a9387eb1SOleksandr Tymoshenko 
836adc99a8aSOleksandr Tymoshenko 	/* Platform transfer methods */
837adc99a8aSOleksandr Tymoshenko 	DEVMETHOD(sdhci_platform_will_handle,		bcm_sdhci_will_handle_transfer),
838adc99a8aSOleksandr Tymoshenko 	DEVMETHOD(sdhci_platform_start_transfer,	bcm_sdhci_start_transfer),
839adc99a8aSOleksandr Tymoshenko 	DEVMETHOD(sdhci_platform_finish_transfer,	bcm_sdhci_finish_transfer),
840adc99a8aSOleksandr Tymoshenko 	/* SDHCI registers accessors */
841a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(sdhci_read_1,		bcm_sdhci_read_1),
842a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(sdhci_read_2,		bcm_sdhci_read_2),
843a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(sdhci_read_4,		bcm_sdhci_read_4),
844a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(sdhci_read_multi_4,	bcm_sdhci_read_multi_4),
845a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(sdhci_write_1,	bcm_sdhci_write_1),
846a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(sdhci_write_2,	bcm_sdhci_write_2),
847a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(sdhci_write_4,	bcm_sdhci_write_4),
848a9387eb1SOleksandr Tymoshenko 	DEVMETHOD(sdhci_write_multi_4,	bcm_sdhci_write_multi_4),
849a9387eb1SOleksandr Tymoshenko 
850b440e965SMarius Strobl 	DEVMETHOD_END
851a9387eb1SOleksandr Tymoshenko };
852a9387eb1SOleksandr Tymoshenko 
853a9387eb1SOleksandr Tymoshenko static driver_t bcm_sdhci_driver = {
854a9387eb1SOleksandr Tymoshenko 	"sdhci_bcm",
855a9387eb1SOleksandr Tymoshenko 	bcm_sdhci_methods,
856a9387eb1SOleksandr Tymoshenko 	sizeof(struct bcm_sdhci_softc),
857a9387eb1SOleksandr Tymoshenko };
858a9387eb1SOleksandr Tymoshenko 
85982d4dc06SJohn Baldwin DRIVER_MODULE(sdhci_bcm, simplebus, bcm_sdhci_driver, NULL, NULL);
860939f1d8fSKyle Evans #ifdef NOTYET
861939f1d8fSKyle Evans MODULE_DEPEND(sdhci_bcm, bcm2835_clkman, 1, 1, 1);
862939f1d8fSKyle Evans #endif
863ab00a509SMarius Strobl SDHCI_DEPEND(sdhci_bcm);
86402c474b4SIlya Bakulin #ifndef MMCCAM
86555dae242SMarius Strobl MMC_DECLARE_BRIDGE(sdhci_bcm);
86602c474b4SIlya Bakulin #endif
867